summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.clang-format43
-rw-r--r--CREDITS20
-rw-r--r--Documentation/ABI/stable/sysfs-driver-mlxreg-io6
-rw-r--r--Documentation/ABI/testing/sysfs-block9
-rw-r--r--Documentation/ABI/testing/sysfs-block-zram11
-rw-r--r--Documentation/admin-guide/README.rst32
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt7
-rw-r--r--Documentation/block/bfq-iosched.txt7
-rw-r--r--Documentation/block/null_blk.txt3
-rw-r--r--Documentation/block/queue-sysfs.txt7
-rw-r--r--Documentation/blockdev/zram.txt74
-rw-r--r--Documentation/bpf/bpf_design_QA.rst11
-rw-r--r--Documentation/core-api/xarray.rst15
-rw-r--r--Documentation/devicetree/bindings/Makefile6
-rw-r--r--Documentation/devicetree/bindings/arm/cpu-capacity.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/idle-states.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/sp810.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/topology.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/actions,owl-cmu.txt7
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5433-clock.txt23
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt24
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mm-clock.txt29
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,mmp2.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.txt1
-rw-r--r--Documentation/devicetree/bindings/display/arm,pl11x.txt2
-rw-r--r--Documentation/devicetree/bindings/display/msm/gpu.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt2
-rw-r--r--Documentation/devicetree/bindings/reset/socfpga-reset.txt3
-rw-r--r--Documentation/devicetree/bindings/reset/uniphier-reset.txt25
-rw-r--r--Documentation/devicetree/bindings/serio/olpc,ap-sp.txt4
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt4
-rw-r--r--Documentation/driver-model/bus.txt8
-rw-r--r--Documentation/driver-model/devres.txt2
-rw-r--r--Documentation/fb/fbcon.txt8
-rw-r--r--Documentation/features/core/cBPF-JIT/arch-support.txt1
-rw-r--r--Documentation/features/core/eBPF-JIT/arch-support.txt1
-rw-r--r--Documentation/features/core/generic-idle-thread/arch-support.txt1
-rw-r--r--Documentation/features/core/jump-labels/arch-support.txt1
-rw-r--r--Documentation/features/core/tracehook/arch-support.txt1
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt1
-rw-r--r--Documentation/features/debug/gcov-profile-all/arch-support.txt1
-rw-r--r--Documentation/features/debug/kgdb/arch-support.txt1
-rw-r--r--Documentation/features/debug/kprobes-on-ftrace/arch-support.txt1
-rw-r--r--Documentation/features/debug/kprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/kretprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/optprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/stackprotector/arch-support.txt1
-rw-r--r--Documentation/features/debug/uprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/user-ret-profiler/arch-support.txt1
-rw-r--r--Documentation/features/io/dma-contiguous/arch-support.txt1
-rw-r--r--Documentation/features/locking/cmpxchg-local/arch-support.txt1
-rw-r--r--Documentation/features/locking/lockdep/arch-support.txt1
-rw-r--r--Documentation/features/locking/queued-rwlocks/arch-support.txt1
-rw-r--r--Documentation/features/locking/queued-spinlocks/arch-support.txt1
-rw-r--r--Documentation/features/locking/rwsem-optimized/arch-support.txt1
-rw-r--r--Documentation/features/perf/kprobes-event/arch-support.txt1
-rw-r--r--Documentation/features/perf/perf-regs/arch-support.txt1
-rw-r--r--Documentation/features/perf/perf-stackdump/arch-support.txt1
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt1
-rw-r--r--Documentation/features/sched/numa-balancing/arch-support.txt1
-rw-r--r--Documentation/features/seccomp/seccomp-filter/arch-support.txt1
-rw-r--r--Documentation/features/time/arch-tick-broadcast/arch-support.txt1
-rw-r--r--Documentation/features/time/clockevents/arch-support.txt1
-rw-r--r--Documentation/features/time/context-tracking/arch-support.txt1
-rw-r--r--Documentation/features/time/irq-time-acct/arch-support.txt1
-rw-r--r--Documentation/features/time/modern-timekeeping/arch-support.txt1
-rw-r--r--Documentation/features/time/virt-cpuacct/arch-support.txt1
-rw-r--r--Documentation/features/vm/ELF-ASLR/arch-support.txt1
-rw-r--r--Documentation/features/vm/PG_uncached/arch-support.txt1
-rw-r--r--Documentation/features/vm/THP/arch-support.txt1
-rw-r--r--Documentation/features/vm/TLB/arch-support.txt1
-rw-r--r--Documentation/features/vm/huge-vmap/arch-support.txt1
-rw-r--r--Documentation/features/vm/ioremap_prot/arch-support.txt1
-rw-r--r--Documentation/features/vm/numa-memblock/arch-support.txt1
-rw-r--r--Documentation/features/vm/pte_special/arch-support.txt1
-rw-r--r--Documentation/filesystems/sysfs.txt4
-rw-r--r--Documentation/networking/dsa/dsa.txt10
-rw-r--r--Documentation/networking/index.rst26
-rw-r--r--Documentation/networking/msg_zerocopy.rst2
-rw-r--r--Documentation/networking/operstates.txt14
-rw-r--r--Documentation/networking/rxrpc.txt45
-rw-r--r--Documentation/networking/snmp_counter.rst130
-rw-r--r--Documentation/networking/switchdev.txt10
-rw-r--r--Documentation/networking/timestamping.txt4
-rw-r--r--Documentation/process/applying-patches.rst117
-rw-r--r--Documentation/sysctl/fs.txt28
-rw-r--r--Documentation/trace/coresight-cpu-debug.txt2
-rw-r--r--Documentation/translations/it_IT/admin-guide/README.rst2
-rw-r--r--Documentation/virtual/kvm/amd-memory-encryption.rst2
-rw-r--r--Documentation/x86/resctrl_ui.txt2
-rw-r--r--MAINTAINERS146
-rw-r--r--Makefile10
-rw-r--r--arch/alpha/include/asm/irq.h6
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/Kconfig20
-rw-r--r--arch/arc/configs/nps_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/Kbuild4
-rw-r--r--arch/arc/include/asm/arcregs.h20
-rw-r--r--arch/arc/include/asm/bitops.h6
-rw-r--r--arch/arc/include/asm/cache.h11
-rw-r--r--arch/arc/include/asm/entry-arcv2.h54
-rw-r--r--arch/arc/include/asm/perf_event.h3
-rw-r--r--arch/arc/include/asm/uaccess.h8
-rw-r--r--arch/arc/kernel/entry-arcv2.S4
-rw-r--r--arch/arc/kernel/head.S16
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/perf_event.c241
-rw-r--r--arch/arc/kernel/setup.c144
-rw-r--r--arch/arc/kernel/troubleshoot.c30
-rw-r--r--arch/arc/lib/memcpy-archs.S14
-rw-r--r--arch/arc/lib/memset-archs.S40
-rw-r--r--arch/arc/mm/fault.c13
-rw-r--r--arch/arc/mm/init.c3
-rw-r--r--arch/arc/plat-hsdk/Kconfig1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts4
-rw-r--r--arch/arm/boot/dts/am335x-shc.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts46
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts13
-rw-r--r--arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts85
-rw-r--r--arch/arm/boot/dts/da850-evm.dts31
-rw-r--r--arch/arm/boot/dts/da850-lcdk.dts38
-rw-r--r--arch/arm/boot/dts/da850.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-pistachio.dts2
-rw-r--r--arch/arm/boot/dts/imx6sll-evk.dts2
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi2
-rw-r--r--arch/arm/boot/dts/kirkwood-dnskw.dtsi4
-rw-r--r--arch/arm/boot/dts/meson.dtsi2
-rw-r--r--arch/arm/boot/dts/meson8b-ec100.dts3
-rw-r--r--arch/arm/boot/dts/meson8b-odroidc1.dts4
-rw-r--r--arch/arm/boot/dts/meson8m2-mxiii-plus.dts3
-rw-r--r--arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi42
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts11
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi9
-rw-r--r--arch/arm/boot/dts/omap5-cm-t54.dts12
-rw-r--r--arch/arm/boot/dts/omap5-l4.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7743.dtsi36
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi1
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi1
-rw-r--r--arch/arm/boot/dts/sun8i-h3-beelink-x2.dts2
-rw-r--r--arch/arm/boot/dts/tegra124-nyan.dtsi17
-rw-r--r--arch/arm/boot/dts/vf610-bk4.dts4
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h10
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h5
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h94
-rw-r--r--arch/arm/kernel/irq.c62
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/kvm/reset.c24
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c4
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c4
-rw-r--r--arch/arm/mach-integrator/impd1.c8
-rw-r--r--arch/arm/mach-iop32x/n2100.c3
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c16
-rw-r--r--arch/arm/mach-omap2/display.c7
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c36
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c16
-rw-r--r--arch/arm/mach-socfpga/socfpga.c4
-rw-r--r--arch/arm/mach-tango/pm.c6
-rw-r--r--arch/arm/mach-tango/pm.h7
-rw-r--r--arch/arm/mach-tango/setup.c2
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/plat-pxa/ssp.c3
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c2
-rw-r--r--arch/arm/xen/mm.c1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-evk.dts44
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi17
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796.dtsi3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts2
-rw-r--r--arch/arm64/configs/defconfig4
-rw-r--r--arch/arm64/include/asm/asm-prototypes.h2
-rw-r--r--arch/arm64/include/asm/cache.h4
-rw-r--r--arch/arm64/include/asm/device.h3
-rw-r--r--arch/arm64/include/asm/kvm_host.h11
-rw-r--r--arch/arm64/include/asm/memory.h11
-rw-r--r--arch/arm64/include/asm/mmu.h44
-rw-r--r--arch/arm64/include/asm/neon-intrinsics.h4
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h4
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h76
-rw-r--r--arch/arm64/kernel/cpu_errata.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c9
-rw-r--r--arch/arm64/kernel/head.S4
-rw-r--r--arch/arm64/kernel/hibernate.c4
-rw-r--r--arch/arm64/kernel/hyp-stub.S2
-rw-r--r--arch/arm64/kernel/kaslr.c9
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c8
-rw-r--r--arch/arm64/kernel/probes/kprobes.c6
-rw-r--r--arch/arm64/kernel/ptrace.c15
-rw-r--r--arch/arm64/kernel/setup.c4
-rw-r--r--arch/arm64/kvm/hyp/switch.c5
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c5
-rw-r--r--arch/arm64/kvm/reset.c50
-rw-r--r--arch/arm64/kvm/sys_regs.c50
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/dump.c59
-rw-r--r--arch/arm64/mm/flush.c6
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/csky/include/asm/io.h25
-rw-r--r--arch/csky/include/asm/pgalloc.h43
-rw-r--r--arch/csky/include/asm/pgtable.h9
-rw-r--r--arch/csky/include/asm/processor.h4
-rw-r--r--arch/csky/kernel/dumpstack.c4
-rw-r--r--arch/csky/kernel/module.c38
-rw-r--r--arch/csky/kernel/ptrace.c3
-rw-r--r--arch/csky/kernel/smp.c3
-rw-r--r--arch/csky/mm/ioremap.c14
-rw-r--r--arch/h8300/Makefile2
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/m68k/emu/nfblock.c10
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/mips/Kconfig16
-rw-r--r--arch/mips/bcm47xx/setup.c31
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts8
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi2
-rw-r--r--arch/mips/boot/dts/xilfpga/nexys4ddr.dts8
-rw-r--r--arch/mips/cavium-octeon/setup.c2
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/include/asm/atomic.h6
-rw-r--r--arch/mips/include/asm/barrier.h36
-rw-r--r--arch/mips/include/asm/bitops.h5
-rw-r--r--arch/mips/include/asm/futex.h3
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h2
-rw-r--r--arch/mips/include/asm/pgtable.h2
-rw-r--r--arch/mips/jazz/jazzdma.c5
-rw-r--r--arch/mips/kernel/mips-cm.c2
-rw-r--r--arch/mips/kernel/process.c7
-rw-r--r--arch/mips/lantiq/irq.c77
-rw-r--r--arch/mips/lantiq/xway/dma.c6
-rw-r--r--arch/mips/loongson64/Platform23
-rw-r--r--arch/mips/loongson64/common/reset.c7
-rw-r--r--arch/mips/mm/tlbex.c10
-rw-r--r--arch/mips/net/ebpf_jit.c24
-rw-r--r--arch/mips/pci/msi-octeon.c4
-rw-r--r--arch/mips/pci/pci-octeon.c10
-rw-r--r--arch/mips/vdso/Makefile5
-rw-r--r--arch/nds32/Makefile8
-rw-r--r--arch/openrisc/Makefile3
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/uaccess.h8
-rw-r--r--arch/parisc/kernel/ptrace.c29
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h26
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h1
-rw-r--r--arch/powerpc/kernel/head_8xx.S3
-rw-r--r--arch/powerpc/kernel/signal_64.c7
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c7
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c22
-rw-r--r--arch/powerpc/perf/perf_regs.c6
-rw-r--r--arch/powerpc/platforms/4xx/ocm.c6
-rw-r--r--arch/powerpc/platforms/chrp/setup.c3
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c2
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci.c2
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c5
-rw-r--r--arch/powerpc/platforms/pseries/pci.c2
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c7
-rw-r--r--arch/riscv/Kconfig6
-rw-r--r--arch/riscv/configs/defconfig8
-rw-r--r--arch/riscv/include/asm/module.h28
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h6
-rw-r--r--arch/riscv/include/asm/pgtable.h8
-rw-r--r--arch/riscv/include/asm/processor.h2
-rw-r--r--arch/riscv/include/asm/ptrace.h5
-rw-r--r--arch/riscv/include/asm/syscall.h10
-rw-r--r--arch/riscv/include/asm/thread_info.h6
-rw-r--r--arch/riscv/include/asm/unistd.h2
-rw-r--r--arch/riscv/kernel/asm-offsets.c1
-rw-r--r--arch/riscv/kernel/entry.S22
-rw-r--r--arch/riscv/kernel/module-sections.c30
-rw-r--r--arch/riscv/kernel/ptrace.c9
-rw-r--r--arch/riscv/kernel/setup.c11
-rw-r--r--arch/riscv/kernel/smp.c43
-rw-r--r--arch/riscv/kernel/smpboot.c6
-rw-r--r--arch/riscv/mm/init.c3
-rw-r--r--arch/s390/include/asm/mmu_context.h7
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c11
-rw-r--r--arch/s390/kernel/swsusp.S4
-rw-r--r--arch/s390/kernel/vdso.c5
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/s390/pci/pci.c4
-rw-r--r--arch/sh/boot/dts/Makefile2
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig10
-rw-r--r--arch/x86/boot/compressed/head_64.S10
-rw-r--r--arch/x86/boot/compressed/pgtable.h2
-rw-r--r--arch/x86/entry/entry_64_compat.S6
-rw-r--r--arch/x86/events/core.c14
-rw-r--r--arch/x86/events/intel/core.c25
-rw-r--r--arch/x86/events/intel/uncore_snbep.c4
-rw-r--r--arch/x86/events/perf_event.h16
-rw-r--r--arch/x86/ia32/ia32_aout.c6
-rw-r--r--arch/x86/include/asm/intel-family.h5
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h18
-rw-r--r--arch/x86/include/asm/page_64_types.h4
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/resctrl_sched.h4
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/asm/uv/bios.h8
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/bugs.c4
-rw-r--r--arch/x86/kernel/cpu/mce/core.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/Makefile4
-rw-r--r--arch/x86/kernel/crash.c1
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c5
-rw-r--r--arch/x86/kernel/kvm.c7
-rw-r--r--arch/x86/kernel/tsc.c30
-rw-r--r--arch/x86/kvm/Makefile4
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/hyperv.c7
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c19
-rw-r--r--arch/x86/kvm/svm.c34
-rw-r--r--arch/x86/kvm/trace.h2
-rw-r--r--arch/x86/kvm/vmx/evmcs.c7
-rw-r--r--arch/x86/kvm/vmx/nested.c30
-rw-r--r--arch/x86/kvm/vmx/vmx.c183
-rw-r--r--arch/x86/kvm/vmx/vmx.h10
-rw-r--r--arch/x86/kvm/x86.c17
-rw-r--r--arch/x86/lib/iomem.c33
-rw-r--r--arch/x86/lib/kaslr.c4
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c4
-rw-r--r--arch/x86/mm/pageattr.c50
-rw-r--r--arch/x86/platform/uv/bios_uv.c23
-rw-r--r--arch/x86/xen/enlighten_pv.c5
-rw-r--r--arch/x86/xen/time.c12
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--arch/xtensa/boot/dts/Makefile6
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig2
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig3
-rw-r--r--arch/xtensa/kernel/head.S5
-rw-r--r--arch/xtensa/kernel/smp.c41
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--block/bfq-wf2q.c11
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-iolatency.c56
-rw-r--r--block/blk-mq-debugfs-zoned.c2
-rw-r--r--block/blk-mq-debugfs.c6
-rw-r--r--block/blk-mq.c15
-rw-r--r--block/blk-mq.h1
-rw-r--r--block/blk-wbt.c4
-rw-r--r--crypto/adiantum.c4
-rw-r--r--crypto/af_alg.c4
-rw-r--r--crypto/authenc.c14
-rw-r--r--crypto/authencesn.c2
-rw-r--r--crypto/sm3_generic.c2
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/arm64/iort.c5
-rw-r--r--drivers/acpi/bus.c27
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/nfit/core.c86
-rw-r--r--drivers/acpi/nfit/intel.c8
-rw-r--r--drivers/acpi/numa.c6
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c41
-rw-r--r--drivers/acpi/power.c22
-rw-r--r--drivers/android/binder.c37
-rw-r--r--drivers/android/binder_internal.h9
-rw-r--r--drivers/android/binderfs.c296
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_mvebu.c87
-rw-r--r--drivers/ata/libahci_platform.c13
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/pata_macio.c9
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/ata/sata_inic162x.c22
-rw-r--r--drivers/atm/he.c41
-rw-r--r--drivers/atm/idt77252.c16
-rw-r--r--drivers/auxdisplay/ht16k33.c2
-rw-r--r--drivers/base/cacheinfo.c6
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/base/power/runtime.c23
-rw-r--r--drivers/base/regmap/regmap-irq.c8
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c35
-rw-r--r--drivers/block/nbd.c5
-rw-r--r--drivers/block/null_blk.h1
-rw-r--r--drivers/block/rbd.c9
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/zram/zram_drv.c90
-rw-r--r--drivers/block/zram/zram_drv.h5
-rw-r--r--drivers/bus/ti-sysc.c6
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c173
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c25
-rw-r--r--drivers/char/mwave/mwavedd.c7
-rw-r--r--drivers/clk/Kconfig7
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/actions/Kconfig5
-rw-r--r--drivers/clk/actions/Makefile1
-rw-r--r--drivers/clk/actions/owl-pll.c2
-rw-r--r--drivers/clk/actions/owl-pll.h30
-rw-r--r--drivers/clk/actions/owl-s500.c525
-rw-r--r--drivers/clk/at91/at91sam9x5.c5
-rw-r--r--drivers/clk/at91/sama5d2.c4
-rw-r--r--drivers/clk/at91/sama5d4.c2
-rw-r--r--drivers/clk/clk-clps711x.c61
-rw-r--r--drivers/clk/clk-devres.c11
-rw-r--r--drivers/clk/clk-fixed-mmio.c101
-rw-r--r--drivers/clk/clk-fractional-divider.c2
-rw-r--r--drivers/clk/clk-gpio.c39
-rw-r--r--drivers/clk/clk-highbank.c1
-rw-r--r--drivers/clk/clk-max77686.c28
-rw-r--r--drivers/clk/clk-qoriq.c5
-rw-r--r--drivers/clk/clk-stm32mp1.c37
-rw-r--r--drivers/clk/clk-twl6040.c53
-rw-r--r--drivers/clk/clk-versaclock5.c4
-rw-r--r--drivers/clk/clk.c19
-rw-r--r--drivers/clk/clkdev.c117
-rw-r--r--drivers/clk/imx/Kconfig6
-rw-r--r--drivers/clk/imx/Makefile4
-rw-r--r--drivers/clk/imx/clk-composite-8m.c2
-rw-r--r--drivers/clk/imx/clk-frac-pll.c5
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c1
-rw-r--r--drivers/clk/imx/clk-imx6q.c1
-rw-r--r--drivers/clk/imx/clk-imx6sx.c1
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c16
-rw-r--r--drivers/clk/imx/clk-imx8mm.c675
-rw-r--r--drivers/clk/imx/clk-imx8mq.c254
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c2
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c1
-rw-r--r--drivers/clk/imx/clk-pll14xx.c392
-rw-r--r--drivers/clk/imx/clk-sccg-pll.c514
-rw-r--r--drivers/clk/imx/clk-scu.c123
-rw-r--r--drivers/clk/imx/clk-scu.h16
-rw-r--r--drivers/clk/imx/clk-vf610.c1
-rw-r--r--drivers/clk/imx/clk.h38
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c1
-rw-r--r--drivers/clk/meson/Kconfig101
-rw-r--r--drivers/clk/meson/Makefile29
-rw-r--r--drivers/clk/meson/axg-aoclk.c193
-rw-r--r--drivers/clk/meson/axg-aoclk.h13
-rw-r--r--drivers/clk/meson/axg-audio.c5
-rw-r--r--drivers/clk/meson/axg.c69
-rw-r--r--drivers/clk/meson/clk-dualdiv.c138
-rw-r--r--drivers/clk/meson/clk-dualdiv.h33
-rw-r--r--drivers/clk/meson/clk-input.c7
-rw-r--r--drivers/clk/meson/clk-input.h19
-rw-r--r--drivers/clk/meson/clk-mpll.c12
-rw-r--r--drivers/clk/meson/clk-mpll.h30
-rw-r--r--drivers/clk/meson/clk-phase.c75
-rw-r--r--drivers/clk/meson/clk-phase.h26
-rw-r--r--drivers/clk/meson/clk-pll.c216
-rw-r--r--drivers/clk/meson/clk-pll.h49
-rw-r--r--drivers/clk/meson/clk-regmap.c5
-rw-r--r--drivers/clk/meson/clk-regmap.h20
-rw-r--r--drivers/clk/meson/clk-triphase.c68
-rw-r--r--drivers/clk/meson/clkc.h127
-rw-r--r--drivers/clk/meson/g12a-aoclk.c454
-rw-r--r--drivers/clk/meson/g12a-aoclk.h34
-rw-r--r--drivers/clk/meson/g12a.c2359
-rw-r--r--drivers/clk/meson/g12a.h175
-rw-r--r--drivers/clk/meson/gxbb-aoclk-32k.c193
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c268
-rw-r--r--drivers/clk/meson/gxbb-aoclk.h20
-rw-r--r--drivers/clk/meson/gxbb.c296
-rw-r--r--drivers/clk/meson/meson-aoclk.c54
-rw-r--r--drivers/clk/meson/meson-aoclk.h13
-rw-r--r--drivers/clk/meson/meson-eeclk.c63
-rw-r--r--drivers/clk/meson/meson-eeclk.h25
-rw-r--r--drivers/clk/meson/meson8b.c374
-rw-r--r--drivers/clk/meson/meson8b.h11
-rw-r--r--drivers/clk/meson/parm.h46
-rw-r--r--drivers/clk/meson/sclk-div.c10
-rw-r--r--drivers/clk/meson/sclk-div.h (renamed from drivers/clk/meson/clkc-audio.h)16
-rw-r--r--drivers/clk/meson/vid-pll-div.c10
-rw-r--r--drivers/clk/meson/vid-pll-div.h20
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c9
-rw-r--r--drivers/clk/mvebu/armada-370.c4
-rw-r--r--drivers/clk/mvebu/armada-xp.c4
-rw-r--r--drivers/clk/mvebu/dove.c8
-rw-r--r--drivers/clk/mvebu/kirkwood.c2
-rw-r--r--drivers/clk/mvebu/mv98dx3236.c4
-rw-r--r--drivers/clk/qcom/Kconfig1
-rw-r--r--drivers/clk/qcom/clk-rcg.h5
-rw-r--r--drivers/clk/qcom/clk-rcg2.c24
-rw-r--r--drivers/clk/qcom/clk-rpmh.c146
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c63
-rw-r--r--drivers/clk/qcom/common.c8
-rw-r--r--drivers/clk/qcom/common.h2
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c10
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c11
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c10
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c61
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c10
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c11
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c14
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c10
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c15
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c147
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h4
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c13
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c38
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c2
-rw-r--r--drivers/clk/socfpga/clk-gate.c22
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c1
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c2
-rw-r--r--drivers/clk/socfpga/clk-pll.c1
-rw-r--r--drivers/clk/socfpga/clk-s10.c20
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c2
-rw-r--r--drivers/clk/tegra/clk-dfll.c18
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c4
-rw-r--r--drivers/clk/ti/adpll.c2
-rw-r--r--drivers/clk/ti/apll.c4
-rw-r--r--drivers/clk/ti/autoidle.c101
-rw-r--r--drivers/clk/ti/clk.c80
-rw-r--r--drivers/clk/ti/clkctrl.c2
-rw-r--r--drivers/clk/ti/clock.h5
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/ti/divider.c13
-rw-r--r--drivers/clk/ti/dpll.c11
-rw-r--r--drivers/clk/ti/dpll3xxx.c2
-rw-r--r--drivers/clk/ti/gate.c2
-rw-r--r--drivers/clk/ti/interface.c4
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-cpugear.c2
-rw-r--r--drivers/clk/x86/clk-st.c3
-rw-r--r--drivers/clk/zynqmp/clkc.c4
-rw-r--r--drivers/clocksource/timer-ti-dm.c5
-rw-r--r--drivers/cpufreq/cpufreq.c12
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c8
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c4
-rw-r--r--drivers/cpuidle/poll_state.c2
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c6
-rw-r--r--drivers/crypto/bcm/cipher.c44
-rw-r--r--drivers/crypto/caam/caamalg.c2
-rw-r--r--drivers/crypto/caam/caamhash.c15
-rw-r--r--drivers/crypto/caam/desc.h1
-rw-r--r--drivers/crypto/caam/error.h9
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c4
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c7
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c10
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c6
-rw-r--r--drivers/crypto/ccree/cc_aead.c40
-rw-r--r--drivers/crypto/ccree/cc_driver.c7
-rw-r--r--drivers/crypto/ccree/cc_pm.c13
-rw-r--r--drivers/crypto/ccree/cc_pm.h3
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c4
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c15
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c16
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c12
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c68
-rw-r--r--drivers/crypto/talitos.c26
-rw-r--r--drivers/dma/at_xdmac.c19
-rw-r--r--drivers/dma/bcm2835-dma.c70
-rw-r--r--drivers/dma/dmatest.c32
-rw-r--r--drivers/dma/imx-dma.c8
-rw-r--r--drivers/dma/imx-sdma.c8
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mxs-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c14
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c6
-rw-r--r--drivers/edac/altera_edac.h4
-rw-r--r--drivers/firewire/sbp2.c5
-rw-r--r--drivers/firmware/arm_scmi/bus.c9
-rw-r--r--drivers/firmware/efi/arm-runtime.c5
-rw-r--r--drivers/firmware/efi/efi.c4
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c7
-rw-r--r--drivers/fpga/stratix10-soc.c5
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c4
-rw-r--r--drivers/gpio/gpio-eic-sprd.c14
-rw-r--r--drivers/gpio/gpio-mt7621.c20
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c26
-rw-r--r--drivers/gpio/gpio-pxa.c1
-rw-r--r--drivers/gpio/gpio-vf610.c5
-rw-r--r--drivers/gpio/gpiolib-acpi.c7
-rw-r--r--drivers/gpio/gpiolib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c49
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c23
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c48
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c3
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c133
-rw-r--r--drivers/gpu/drm/drm_lease.c3
-rw-r--r--drivers/gpu/drm/drm_mode_object.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c75
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c23
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c22
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h18
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c240
-rw-r--r--drivers/gpu/drm/i915/intel_display.c50
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c45
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c38
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h9
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c25
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c4
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c23
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c26
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c27
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c14
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c15
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h11
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c14
-rw-r--r--drivers/gpu/drm/vkms/vkms_crc.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c7
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h2
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c6
-rw-r--r--drivers/gpu/vga/Kconfig1
-rw-r--r--drivers/hid/hid-core.c23
-rw-r--r--drivers/hid/hid-debug.c120
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hv/channel.c9
-rw-r--r--drivers/hv/hv_balloon.c10
-rw-r--r--drivers/hv/ring_buffer.c31
-rw-r--r--drivers/hv/vmbus_drv.c91
-rw-r--r--drivers/hwmon/lm80.c4
-rw-r--r--drivers/hwmon/nct6775.c15
-rw-r--r--drivers/hwmon/occ/common.c24
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c12
-rw-r--r--drivers/i2c/busses/i2c-cadence.c9
-rw-r--r--drivers/i2c/busses/i2c-omap.c13
-rw-r--r--drivers/i2c/busses/i2c-tegra.c15
-rw-r--r--drivers/i2c/i2c-dev.c6
-rw-r--r--drivers/i3c/master.c2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c25
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c4
-rw-r--r--drivers/ide/ide-atapi.c9
-rw-r--r--drivers/ide/ide-io.c61
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/ide/ide-proc.c2
-rw-r--r--drivers/iio/adc/axp288_adc.c76
-rw-r--r--drivers/iio/adc/ti-ads8688.c3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c5
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c7
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/core_priv.h1
-rw-r--r--drivers/infiniband/core/device.c13
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/infiniband/core/rdma_core.h2
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c11
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c62
-rw-r--r--drivers/infiniband/core/uverbs_main.c26
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c8
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/init.c29
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c27
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h35
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c6
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c10
-rw-r--r--drivers/input/joystick/xpad.c3
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/cap11xx.c35
-rw-r--r--drivers/input/keyboard/matrix_keypad.c2
-rw-r--r--drivers/input/keyboard/qt2160.c69
-rw-r--r--drivers/input/keyboard/st-keyscan.c4
-rw-r--r--drivers/input/misc/apanel.c24
-rw-r--r--drivers/input/misc/bma150.c9
-rw-r--r--drivers/input/misc/pwm-vibra.c19
-rw-r--r--drivers/input/misc/uinput.c5
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/mouse/elantech.c9
-rw-r--r--drivers/input/serio/olpc_apsp.c17
-rw-r--r--drivers/input/serio/ps2-gpio.c1
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c4
-rw-r--r--drivers/iommu/amd_iommu.c19
-rw-r--r--drivers/iommu/intel-iommu.c10
-rw-r--r--drivers/iommu/mtk_iommu_v1.c9
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c77
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c126
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c2
-rw-r--r--drivers/irqchip/irq-madera.c2
-rw-r--r--drivers/irqchip/irq-mmp.c6
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c40
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c6
-rw-r--r--drivers/isdn/hardware/avm/b1.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c3
-rw-r--r--drivers/isdn/i4l/isdn_tty.c6
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c4
-rw-r--r--drivers/mailbox/mailbox.c1
-rw-r--r--drivers/md/dm-crypt.c27
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin-metadata.h2
-rw-r--r--drivers/md/dm-thin.c65
-rw-r--r--drivers/md/dm.c41
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/raid1.c28
-rw-r--r--drivers/md/raid5-cache.c33
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c2
-rw-r--r--drivers/media/platform/vim2m.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c24
-rw-r--r--drivers/mfd/Kconfig3
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/axp20x.c126
-rw-r--r--drivers/mfd/bd9571mwv.c1
-rw-r--r--drivers/mfd/cros_ec_dev.c1
-rw-r--r--drivers/mfd/db8500-prcmu.c4
-rw-r--r--drivers/mfd/exynos-lpass.c4
-rw-r--r--drivers/mfd/madera-core.c5
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/mc13xxx-core.c4
-rw-r--r--drivers/mfd/mt6397-core.c3
-rw-r--r--drivers/mfd/qcom_rpm.c4
-rw-r--r--drivers/mfd/rave-sp.c2
-rw-r--r--drivers/mfd/stmpe.c12
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c5
-rw-r--r--drivers/mfd/tps65218.c24
-rw-r--r--drivers/mfd/tps6586x.c24
-rw-r--r--drivers/mfd/twl-core.c4
-rw-r--r--drivers/mfd/wm5110-tables.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/ibmvmc.c7
-rw-r--r--drivers/misc/mei/client.c5
-rw-r--r--drivers/misc/mei/hbm.c12
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/pci-me.c6
-rw-r--r--drivers/misc/mic/vop/vop_main.c82
-rw-r--r--drivers/misc/pvpanic.c4
-rw-r--r--drivers/mmc/core/block.c10
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/host/Kconfig4
-rw-r--r--drivers/mmc/host/bcm2835.c2
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c5
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c33
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/sdhci-iproc.c5
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/mmc/host/sunxi-mmc.c26
-rw-r--r--drivers/mtd/devices/powernv_flash.c2
-rw-r--r--drivers/mtd/mtdcore.c3
-rw-r--r--drivers/mtd/mtdcore.h2
-rw-r--r--drivers/mtd/mtdpart.c39
-rw-r--r--drivers/mtd/nand/raw/denali.c2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c21
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c13
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c1
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c20
-rw-r--r--drivers/mtd/nand/spi/core.c46
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/bonding/bond_main.c38
-rw-r--r--drivers/net/caif/caif_serial.c5
-rw-r--r--drivers/net/can/dev.c27
-rw-r--r--drivers/net/can/flexcan.c4
-rw-r--r--drivers/net/dsa/b53/b53_common.c90
-rw-r--r--drivers/net/dsa/b53/b53_priv.h3
-rw-r--r--drivers/net/dsa/b53/b53_srab.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c12
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c2
-rw-r--r--drivers/net/dsa/mt7530.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c141
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c21
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h10
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c2
-rw-r--r--drivers/net/dsa/realtek-smi.c18
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c12
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c61
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c22
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c8
-rw-r--r--drivers/net/ethernet/apple/bmac.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c7
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c33
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/cadence/macb.h3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c36
-rw-r--r--drivers/net/ethernet/cavium/Kconfig1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c149
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c128
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c68
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c14
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c8
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c31
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c14
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c15
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c35
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c12
-rw-r--r--drivers/net/ethernet/ni/nixge.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c28
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c30
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c12
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c12
-rw-r--r--drivers/net/ethernet/sfc/ef10.c29
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c17
-rw-r--r--drivers/net/ethernet/sun/cassini.h15
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c8
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c12
-rw-r--r--drivers/net/fddi/defxx.c8
-rw-r--r--drivers/net/fddi/skfp/skfddi.c8
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/hyperv/hyperv_net.h12
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c145
-rw-r--r--drivers/net/hyperv/rndis_filter.c36
-rw-r--r--drivers/net/ieee802154/mcr20a.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c10
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/asix.c8
-rw-r--r--drivers/net/phy/bcm87xx.c2
-rw-r--r--drivers/net/phy/cortina.c1
-rw-r--r--drivers/net/phy/dp83640.c13
-rw-r--r--drivers/net/phy/marvell.c53
-rw-r--r--drivers/net/phy/marvell10g.c6
-rw-r--r--drivers/net/phy/mdio-hisi-femac.c16
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/meson-gxl.c1
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/phy.c33
-rw-r--r--drivers/net/phy/phy_device.c17
-rw-r--r--drivers/net/phy/phylink.c19
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/rockchip.c9
-rw-r--r--drivers/net/phy/sfp-bus.c2
-rw-r--r--drivers/net/phy/sfp.c30
-rw-r--r--drivers/net/phy/sfp.h2
-rw-r--r--drivers/net/phy/teranetics.c1
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c5
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/team/team.c31
-rw-r--r--drivers/net/tun.c14
-rw-r--r--drivers/net/usb/aqc111.c15
-rw-r--r--drivers/net/usb/asix_devices.c9
-rw-r--r--drivers/net/usb/cdc_ether.c34
-rw-r--r--drivers/net/usb/qmi_wwan.c5
-rw-r--r--drivers/net/usb/r8152.c5
-rw-r--r--drivers/net/virtio_net.c181
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c14
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c71
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c39
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c46
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c15
-rw-r--r--drivers/net/wireless/virt_wifi.c4
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c8
-rw-r--r--drivers/nvdimm/dimm.c6
-rw-r--r--drivers/nvdimm/dimm_devs.c22
-rw-r--r--drivers/nvdimm/nd-core.h4
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvme/host/core.c27
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/multipath.c5
-rw-r--r--drivers/nvme/host/nvme.h6
-rw-r--r--drivers/nvme/host/pci.c126
-rw-r--r--drivers/nvme/host/rdma.c64
-rw-r--r--drivers/nvme/host/tcp.c35
-rw-r--r--drivers/nvme/target/rdma.c15
-rw-r--r--drivers/nvme/target/tcp.c2
-rw-r--r--drivers/of/dynamic.c3
-rw-r--r--drivers/of/fdt.c4
-rw-r--r--drivers/of/overlay.c3
-rw-r--r--drivers/of/pdt.c1
-rw-r--r--drivers/of/property.c1
-rw-r--r--drivers/opp/core.c63
-rw-r--r--drivers/pci/Kconfig22
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c11
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c16
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/msi.c22
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/quirks.c5
-rw-r--r--drivers/pci/switch/switchtec.c8
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c5
-rw-r--r--drivers/phy/qualcomm/phy-ath79-usb.c4
-rw-r--r--drivers/phy/ti/Kconfig1
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c8
-rw-r--r--drivers/pinctrl/mediatek/Kconfig3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c44
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h2
-rw-r--r--drivers/platform/x86/Kconfig8
-rw-r--r--drivers/ptp/ptp_chardev.c3
-rw-r--r--drivers/rapidio/devices/tsi721.c22
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c9
-rw-r--r--drivers/reset/Kconfig20
-rw-r--r--drivers/reset/Makefile3
-rw-r--r--drivers/reset/core.c42
-rw-r--r--drivers/reset/reset-hsdk.c1
-rw-r--r--drivers/reset/reset-simple.c13
-rw-r--r--drivers/reset/reset-socfpga.c88
-rw-r--r--drivers/reset/reset-uniphier-glue.c (renamed from drivers/reset/reset-uniphier-usb3.c)50
-rw-r--r--drivers/s390/block/dasd_eckd.c8
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/s390/net/ism_drv.c15
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c31
-rw-r--r--drivers/s390/net/qeth_l2_main.c8
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c12
-rw-r--r--drivers/scsi/3w-sas.c5
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/a100u2w.c8
-rw-r--r--drivers/scsi/aacraid/linit.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c8
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c18
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c11
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c49
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c44
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c8
-rw-r--r--drivers/scsi/csiostor/csio_attr.c2
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c9
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c28
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c7
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c12
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/libfc/fc_lport.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c35
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c15
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c8
-rw-r--r--drivers/scsi/mesh.c5
-rw-r--r--drivers/scsi/mvumi.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c6
-rw-r--r--drivers/scsi/qedf/qedf_main.c29
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c3
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c39
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/scsi_debug.c41
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/scsi_pm.c26
-rw-r--r--drivers/scsi/sd.c18
-rw-r--r--drivers/scsi/sd_zbc.c20
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c34
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c12
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c2
-rw-r--r--drivers/soc/fsl/qbman/qman.c9
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c55
-rw-r--r--drivers/soc/renesas/Kconfig2
-rw-r--r--drivers/soc/renesas/r8a774c0-sysc.c23
-rw-r--r--drivers/spi/spi-pic32-sqi.c6
-rw-r--r--drivers/staging/android/ion/ion.c2
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c3
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c1
-rw-r--r--drivers/staging/vt6655/device_main.c19
-rw-r--r--drivers/staging/wilc1000/host_interface.c5
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c7
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/target_core_configfs.c8
-rw-r--r--drivers/target/target_core_user.c89
-rw-r--r--drivers/thermal/cpu_cooling.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c30
-rw-r--r--drivers/thermal/of-thermal.c4
-rw-r--r--drivers/tty/n_hdlc.c1
-rw-r--r--drivers/tty/serial/8250/8250_core.c17
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c3
-rw-r--r--drivers/tty/serial/8250/8250_pci.c9
-rw-r--r--drivers/tty/serial/Kconfig12
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/earlycon-riscv-sbi.c31
-rw-r--r--drivers/tty/serial/fsl_lpuart.c2
-rw-r--r--drivers/tty/serial/lantiq.c36
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c4
-rw-r--r--drivers/tty/serial/serial_core.c18
-rw-r--r--drivers/tty/serial/sh-sci.c9
-rw-r--r--drivers/tty/tty_io.c23
-rw-r--r--drivers/tty/vt/vt.c50
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c3
-rw-r--r--drivers/usb/class/cdc-acm.c7
-rw-r--r--drivers/usb/core/generic.c9
-rw-r--r--drivers/usb/core/ledtrig-usbport.c17
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/gadget.c2
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c13
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/host/ehci-mv.c1
-rw-r--r--drivers/usb/host/uhci-hcd.c6
-rw-r--r--drivers/usb/host/xhci-mem.c8
-rw-r--r--drivers/usb/musb/musb_gadget.c13
-rw-r--r--drivers/usb/musb/musbhsdma.c21
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/usb/phy/phy-am335x.c5
-rw-r--r--drivers/usb/serial/ftdi_sio.c15
-rw-r--r--drivers/usb/serial/keyspan_usa26msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa28msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa49msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa67msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa90msg.h1
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h2
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/storage/scsiglue.c8
-rw-r--r--drivers/usb/storage/unusual_devs.h12
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c3
-rw-r--r--drivers/usb/usbip/README7
-rw-r--r--drivers/vfio/pci/trace.h8
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c36
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/scsi.c22
-rw-r--r--drivers/vhost/vhost.c112
-rw-r--r--drivers/vhost/vhost.h7
-rw-r--r--drivers/vhost/vsock.c4
-rw-r--r--drivers/video/backlight/88pm860x_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c28
-rw-r--r--drivers/video/console/vgacon.c7
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbmem.c19
-rw-r--r--drivers/video/fbdev/da8xx-fb.c6
-rw-r--r--drivers/video/fbdev/offb.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/logo/Kconfig9
-rw-r--r--drivers/virtio/virtio_balloon.c98
-rw-r--r--drivers/virtio/virtio_mmio.c9
-rw-r--r--drivers/virtio/virtio_pci_common.c8
-rw-r--r--drivers/virtio/virtio_ring.c15
-rw-r--r--drivers/watchdog/mt7621_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/tqmx86_wdt.c8
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/pvcalls-back.c9
-rw-r--r--drivers/xen/pvcalls-front.c104
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--fs/afs/flock.c4
-rw-r--r--fs/afs/inode.c3
-rw-r--r--fs/afs/protocol_yfs.h11
-rw-r--r--fs/afs/rxrpc.c53
-rw-r--r--fs/afs/server_list.c4
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/aio.c1
-rw-r--r--fs/autofs/expire.c3
-rw-r--r--fs/autofs/inode.c4
-rw-r--r--fs/binfmt_script.c57
-rw-r--r--fs/block_dev.c28
-rw-r--r--fs/btrfs/ctree.c76
-rw-r--r--fs/btrfs/ctree.h7
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/extent-tree.c21
-rw-r--r--fs/btrfs/inode.c5
-rw-r--r--fs/btrfs/ioctl.c49
-rw-r--r--fs/btrfs/super.c3
-rw-r--r--fs/btrfs/transaction.c24
-rw-r--r--fs/btrfs/volumes.c16
-rw-r--r--fs/buffer.c19
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/quota.c13
-rw-r--r--fs/ceph/snap.c3
-rw-r--r--fs/ceph/super.c4
-rw-r--r--fs/cifs/cifs_debug.c1
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h20
-rw-r--r--fs/cifs/cifssmb.c65
-rw-r--r--fs/cifs/connect.c28
-rw-r--r--fs/cifs/dfs_cache.c1
-rw-r--r--fs/cifs/file.c56
-rw-r--r--fs/cifs/inode.c10
-rw-r--r--fs/cifs/smb2file.c8
-rw-r--r--fs/cifs/smb2inode.c17
-rw-r--r--fs/cifs/smb2misc.c7
-rw-r--r--fs/cifs/smb2ops.c72
-rw-r--r--fs/cifs/smb2pdu.c108
-rw-r--r--fs/cifs/smb2pdu.h19
-rw-r--r--fs/cifs/trace.c10
-rw-r--r--fs/cifs/trace.h10
-rw-r--r--fs/cifs/transport.c113
-rw-r--r--fs/dcache.c38
-rw-r--r--fs/debugfs/inode.c36
-rw-r--r--fs/direct-io.c5
-rw-r--r--fs/drop_caches.c8
-rw-r--r--fs/ext4/fsync.c13
-rw-r--r--fs/fs-writeback.c40
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/glops.c1
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c190
-rw-r--r--fs/gfs2/lops.h4
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/recovery.c123
-rw-r--r--fs/gfs2/recovery.h2
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/hugetlbfs/inode.c61
-rw-r--r--fs/inode.c7
-rw-r--r--fs/iomap.c37
-rw-r--r--fs/nfs/nfs4file.c8
-rw-r--r--fs/nfs/nfs4idmap.c31
-rw-r--r--fs/nfs/super.c5
-rw-r--r--fs/nfs/write.c20
-rw-r--r--fs/nfsd/nfsctl.c4
-rw-r--r--fs/nfsd/vfs.c6
-rw-r--r--fs/notify/inotify/inotify_user.c6
-rw-r--r--fs/proc/base.c4
-rw-r--r--fs/proc/generic.c4
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/proc/proc_net.c20
-rw-r--r--fs/proc/task_mmu.c22
-rw-r--r--fs/pstore/ram.c12
-rw-r--r--fs/sysfs/dir.c3
-rw-r--r--fs/sysfs/file.c6
-rw-r--r--fs/sysfs/group.c3
-rw-r--r--fs/sysfs/symlink.c3
-rw-r--r--fs/xfs/scrub/repair.c11
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_buf.c19
-rw-r--r--include/asm-generic/shmparam.h (renamed from include/uapi/asm-generic/shmparam.h)0
-rw-r--r--include/drm/drm_dp_helper.h7
-rw-r--r--include/drm/drm_dp_mst_helper.h3
-rw-r--r--include/dt-bindings/clock/actions,s500-cmu.h78
-rw-r--r--include/dt-bindings/clock/axg-aoclkc.h7
-rw-r--r--include/dt-bindings/clock/exynos5433.h8
-rw-r--r--include/dt-bindings/clock/g12a-aoclkc.h34
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h135
-rw-r--r--include/dt-bindings/clock/gxbb-aoclkc.h7
-rw-r--r--include/dt-bindings/clock/imx5-clock.h3
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h244
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h35
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h2
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h1
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h10
-rw-r--r--include/dt-bindings/clock/qcom,rpmh.h1
-rw-r--r--include/dt-bindings/clock/r8a774a1-cpg-mssr.h1
-rw-r--r--include/dt-bindings/clock/r8a774c0-cpg-mssr.h1
-rw-r--r--include/dt-bindings/clock/stm32mp1-clks.h3
-rw-r--r--include/dt-bindings/reset/amlogic,meson-axg-reset.h3
-rw-r--r--include/dt-bindings/reset/g12a-aoclkc.h18
-rw-r--r--include/keys/request_key_auth-type.h36
-rw-r--r--include/keys/user-type.h2
-rw-r--r--include/kvm/arm_vgic.h6
-rw-r--r--include/linux/backing-dev-defs.h1
-rw-r--r--include/linux/bcma/bcma_soc.h1
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blktrace_api.h8
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--include/linux/bpfilter.h15
-rw-r--r--include/linux/ceph/libceph.h6
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/clk-provider.h3
-rw-r--r--include/linux/clk.h36
-rw-r--r--include/linux/clk/ti.h1
-rw-r--r--include/linux/clkdev.h4
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/compiler-intel.h4
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/compiler_attributes.h14
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/dcache.h7
-rw-r--r--include/linux/dma-mapping.h9
-rw-r--r--include/linux/efi.h7
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/filter.h21
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/hid-debug.h9
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/hyperv.h5
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/if_arp.h1
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/key-type.h22
-rw-r--r--include/linux/libnvdimm.h2
-rw-r--r--include/linux/memblock.h3
-rw-r--r--include/linux/memory_hotplug.h18
-rw-r--r--include/linux/mfd/cros_ec_commands.h94
-rw-r--r--include/linux/mfd/ingenic-tcu.h2
-rw-r--r--include/linux/mfd/madera/core.h7
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h4
-rw-r--r--include/linux/mfd/tmio.h2
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/netdev_features.h24
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/pci-dma-compat.h2
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--include/linux/phy.h29
-rw-r--r--include/linux/phy/phy.h1
-rw-r--r--include/linux/pm_opp.h5
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--include/linux/qcom_scm.h1
-rw-r--r--include/linux/qed/qed_chain.h31
-rw-r--r--include/linux/reset.h15
-rw-r--r--include/linux/sched.h11
-rw-r--r--include/linux/sched/coredump.h1
-rw-r--r--include/linux/sched/wake_q.h6
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/umh.h2
-rw-r--r--include/linux/virtio_config.h13
-rw-r--r--include/linux/virtio_net.h19
-rw-r--r--include/linux/xarray.h227
-rw-r--r--include/net/af_rxrpc.h16
-rw-r--r--include/net/ax25.h12
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/l3mdev.h3
-rw-r--r--include/net/netfilter/nf_flow_table.h1
-rw-r--r--include/net/netfilter/nf_tables.h17
-rw-r--r--include/net/phonet/pep.h5
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tls.h2
-rw-r--r--include/net/xfrm.h12
-rw-r--r--include/rdma/ib_verbs.h24
-rw-r--r--include/sound/compress_driver.h6
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/soc.h6
-rw-r--r--include/trace/events/afs.h2
-rw-r--r--include/uapi/linux/android/binderfs.h (renamed from include/uapi/linux/android/binder_ctl.h)10
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/blkzoned.h1
-rw-r--r--include/uapi/linux/in.h2
-rw-r--r--include/uapi/linux/inet_diag.h16
-rw-r--r--include/uapi/linux/input.h6
-rw-r--r--include/uapi/linux/ptp_clock.h2
-rw-r--r--include/uapi/linux/virtio_config.h6
-rw-r--r--include/uapi/linux/virtio_ring.h10
-rw-r--r--include/uapi/rdma/hns-abi.h5
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h1
-rw-r--r--include/xen/arm/page-coherent.h97
-rw-r--r--init/Kconfig14
-rw-r--r--init/initramfs.c6
-rw-r--r--init/main.c3
-rw-r--r--kernel/bpf/btf.c17
-rw-r--r--kernel/bpf/cgroup.c3
-rw-r--r--kernel/bpf/hashtab.c4
-rw-r--r--kernel/bpf/lpm_trie.c1
-rw-r--r--kernel/bpf/map_in_map.c17
-rw-r--r--kernel/bpf/percpu_freelist.c41
-rw-r--r--kernel/bpf/percpu_freelist.h4
-rw-r--r--kernel/bpf/stackmap.c20
-rw-r--r--kernel/bpf/syscall.c12
-rw-r--r--kernel/bpf/verifier.c72
-rw-r--r--kernel/cpu.c38
-rw-r--r--kernel/dma/swiotlb.c2
-rw-r--r--kernel/events/core.c30
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/exit.c15
-rw-r--r--kernel/fork.c14
-rw-r--r--kernel/futex.c45
-rw-r--r--kernel/irq/irqdesc.c2
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/locking/rtmutex.c37
-rw-r--r--kernel/locking/rwsem-xadd.c11
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/fair.c1
-rw-r--r--kernel/sched/psi.c23
-rw-r--r--kernel/seccomp.c4
-rw-r--r--kernel/signal.c66
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/time/posix-cpu-timers.c1
-rw-r--r--kernel/trace/bpf_trace.c14
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_kprobe.c22
-rw-r--r--kernel/trace/trace_probe_tmpl.h6
-rw-r--r--kernel/trace/trace_uprobe.c9
-rw-r--r--kernel/umh.c33
-rw-r--r--kernel/workqueue.c23
-rw-r--r--kernel/workqueue_internal.h6
-rw-r--r--lib/assoc_array.c8
-rw-r--r--lib/crc32.c4
-rw-r--r--lib/int_sqrt.c2
-rw-r--r--lib/sbitmap.c13
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_rhashtable.c23
-rw-r--r--lib/test_xarray.c57
-rw-r--r--lib/xarray.c92
-rw-r--r--mm/backing-dev.c1
-rw-r--r--mm/debug.c4
-rw-r--r--mm/gup.c3
-rw-r--r--mm/hugetlb.c84
-rw-r--r--mm/kasan/Makefile3
-rw-r--r--mm/kasan/common.c82
-rw-r--r--mm/kasan/tags.c2
-rw-r--r--mm/kmemleak.c10
-rw-r--r--mm/memblock.c11
-rw-r--r--mm/memory-failure.c19
-rw-r--r--mm/memory.c26
-rw-r--r--mm/memory_hotplug.c85
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/migrate.c25
-rw-r--r--mm/mincore.c94
-rw-r--r--mm/oom_kill.c12
-rw-r--r--mm/page_alloc.c40
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/shmem.c10
-rw-r--r--mm/slab.c21
-rw-r--r--mm/slab.h7
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/slub.c61
-rw-r--r--mm/swap.c17
-rw-r--r--mm/usercopy.c9
-rw-r--r--mm/userfaultfd.c11
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmscan.c10
-rw-r--r--net/ax25/ax25_ip.c4
-rw-r--r--net/ax25/ax25_route.c19
-rw-r--r--net/batman-adv/bat_v_elp.c3
-rw-r--r--net/batman-adv/hard-interface.c5
-rw-r--r--net/batman-adv/soft-interface.c4
-rw-r--r--net/bpf/test_run.c45
-rw-r--r--net/bpfilter/bpfilter_kern.c76
-rw-r--r--net/bpfilter/bpfilter_umh_blob.S2
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_forward.c10
-rw-r--r--net/bridge/br_multicast.c9
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_netfilter_ipv6.c1
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/br_vlan.c26
-rw-r--r--net/bridge/netfilter/ebtables.c15
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c1
-rw-r--r--net/can/bcm.c27
-rw-r--r--net/can/gw.c30
-rw-r--r--net/ceph/ceph_common.c11
-rw-r--r--net/ceph/debugfs.c2
-rw-r--r--net/ceph/messenger.c20
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/compat.c6
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/filter.c48
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/neighbour.c15
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/core/skmsg.c3
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/ccid.h4
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/dsa/master.c4
-rw-r--r--net/dsa/port.c7
-rw-r--r--net/dsa/slave.c17
-rw-r--r--net/ipv4/bpfilter/sockopt.c58
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_trie.c15
-rw-r--r--net/ipv4/fou.c12
-rw-r--r--net/ipv4/gre_demux.c17
-rw-r--r--net/ipv4/inet_diag.c10
-rw-r--r--net/ipv4/inetpeer.c1
-rw-r--r--net/ipv4/ip_gre.c66
-rw-r--r--net/ipv4/ip_input.c1
-rw-r--r--net/ipv4/ip_sockglue.c12
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/ip_vti.c50
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic_main.c7
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c24
-rw-r--r--net/ipv4/udp_impl.h1
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/ipv6/af_inet6.c14
-rw-r--r--net/ipv6/datagram.c11
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/fou6.c19
-rw-r--r--net/ipv6/icmp.c8
-rw-r--r--net/ipv6/ip6_gre.c92
-rw-r--r--net/ipv6/ip6mr.c7
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c1
-rw-r--r--net/ipv6/route.c46
-rw-r--r--net/ipv6/seg6.c4
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/ipv6/udp.c38
-rw-r--r--net/ipv6/udp_impl.h1
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/key/af_key.c42
-rw-r--r--net/l2tp/l2tp_core.c9
-rw-r--r--net/l2tp/l2tp_core.h20
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/l2tp/l2tp_ip6.c3
-rw-r--r--net/mac80211/agg-tx.c4
-rw-r--r--net/mac80211/cfg.c10
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/mesh.h6
-rw-r--r--net/mac80211/mesh_pathtbl.c157
-rw-r--r--net/mac80211/rx.c13
-rw-r--r--net/mac80211/tx.c12
-rw-r--r--net/mac80211/util.c6
-rw-r--r--net/netfilter/ipvs/Kconfig1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c23
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_flow_table_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c102
-rw-r--r--net/netfilter/nfnetlink_osf.c4
-rw-r--r--net/netfilter/nft_compat.c180
-rw-r--r--net/netfilter/nft_dynset.c18
-rw-r--r--net/netfilter/nft_flow_offload.c13
-rw-r--r--net/netfilter/nft_immediate.c6
-rw-r--r--net/netfilter/nft_lookup.c18
-rw-r--r--net/netfilter/nft_objref.c18
-rw-r--r--net/netfilter/x_tables.c2
-rw-r--r--net/netrom/nr_timer.c20
-rw-r--r--net/openvswitch/flow.c8
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/phonet/pep.c32
-rw-r--r--net/rds/bind.c6
-rw-r--r--net/rds/ib_send.c4
-rw-r--r--net/rds/message.c4
-rw-r--r--net/rds/rds.h4
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rose/rose_route.c5
-rw-r--r--net/rxrpc/af_rxrpc.c70
-rw-r--r--net/rxrpc/ar-internal.h19
-rw-r--r--net/rxrpc/call_object.c97
-rw-r--r--net/rxrpc/conn_client.c5
-rw-r--r--net/rxrpc/recvmsg.c3
-rw-r--r--net/rxrpc/sendmsg.c24
-rw-r--r--net/sched/act_tunnel_key.c19
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sched/cls_flower.c25
-rw-r--r--net/sched/cls_tcindex.c80
-rw-r--r--net/sched/sch_cake.c5
-rw-r--r--net/sched/sch_cbs.c3
-rw-r--r--net/sched/sch_drr.c7
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c9
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_qfq.c20
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sctp/diag.c1
-rw-r--r--net/sctp/ipv6.c8
-rw-r--r--net/sctp/offload.c1
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c11
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/stream.c82
-rw-r--r--net/sctp/transport.c3
-rw-r--r--net/smc/af_smc.c15
-rw-r--r--net/smc/smc.h6
-rw-r--r--net/smc/smc_cdc.c23
-rw-r--r--net/smc/smc_cdc.h53
-rw-r--r--net/smc/smc_clc.c2
-rw-r--r--net/smc/smc_close.c9
-rw-r--r--net/smc/smc_core.c6
-rw-r--r--net/smc/smc_core.h20
-rw-r--r--net/smc/smc_ib.c6
-rw-r--r--net/smc/smc_llc.c3
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/smc/smc_tx.c64
-rw-r--r--net/smc/smc_wr.c46
-rw-r--r--net/smc/smc_wr.h1
-rw-r--r--net/socket.c82
-rw-r--r--net/sunrpc/auth.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c12
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c49
-rw-r--r--net/sunrpc/clnt.c20
-rw-r--r--net/sunrpc/debugfs.c2
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c105
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c9
-rw-r--r--net/sunrpc/xprtrdma/verbs.c13
-rw-r--r--net/sunrpc/xprtsock.c22
-rw-r--r--net/tipc/link.c17
-rw-r--r--net/tipc/msg.h22
-rw-r--r--net/tipc/netlink_compat.c54
-rw-r--r--net/tipc/node.c11
-rw-r--r--net/tipc/socket.c11
-rw-r--r--net/tipc/topsrv.c2
-rw-r--r--net/tls/tls_sw.c6
-rw-r--r--net/unix/af_unix.c57
-rw-r--r--net/unix/diag.c3
-rw-r--r--net/vmw_vsock/virtio_transport.c29
-rw-r--r--net/vmw_vsock/vmci_transport.c4
-rw-r--r--net/wireless/ap.c2
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/pmsr.c26
-rw-r--r--net/wireless/reg.c13
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/wireless/util.c35
-rw-r--r--net/x25/af_x25.c19
-rw-r--r--net/xdp/xdp_umem.c27
-rw-r--r--net/xdp/xsk.c20
-rw-r--r--net/xfrm/xfrm_interface.c4
-rw-r--r--net/xfrm/xfrm_policy.c67
-rw-r--r--net/xfrm/xfrm_state.c30
-rw-r--r--net/xfrm/xfrm_user.c15
-rw-r--r--samples/bpf/Makefile1
-rw-r--r--samples/bpf/asm_goto_workaround.h16
-rw-r--r--samples/bpf/test_cgrp2_attach2.c14
-rw-r--r--samples/bpf/test_current_task_under_cgroup_user.c2
-rw-r--r--samples/bpf/xdp1_user.c2
-rw-r--r--samples/mei/mei-amt-version.c2
-rw-r--r--samples/seccomp/Makefile1
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/coccinelle/api/alloc/alloc_cast.cocci8
-rw-r--r--scripts/coccinelle/api/alloc/zalloc-simple.cocci11
-rw-r--r--scripts/gcc-plugins/arm_ssp_per_task_plugin.c23
-rw-r--r--scripts/kallsyms.c4
-rw-r--r--scripts/kconfig/Makefile2
-rw-r--r--scripts/mod/modpost.c2
-rw-r--r--security/apparmor/domain.c5
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/keys/internal.h13
-rw-r--r--security/keys/key.c5
-rw-r--r--security/keys/keyctl.c1
-rw-r--r--security/keys/keyring.c4
-rw-r--r--security/keys/proc.c3
-rw-r--r--security/keys/process_keys.c1
-rw-r--r--security/keys/request_key.c73
-rw-r--r--security/keys/request_key_auth.c18
-rw-r--r--security/lsm_audit.c10
-rw-r--r--security/security.c7
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/yama/yama_lsm.c4
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c4
-rw-r--r--sound/core/compress_offload.c3
-rw-r--r--sound/core/pcm_lib.c13
-rw-r--r--sound/pci/cs46xx/dsp_spos.c3
-rw-r--r--sound/pci/hda/hda_bind.c3
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c4
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_realtek.c200
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c6
-rw-r--r--sound/soc/codecs/hdac_hdmi.c116
-rw-r--r--sound/soc/codecs/hdmi-codec.c4
-rw-r--r--sound/soc/codecs/pcm512x.c11
-rw-r--r--sound/soc/codecs/rt274.c5
-rw-r--r--sound/soc/codecs/rt5514-spi.c2
-rw-r--r--sound/soc/codecs/rt5682.c3
-rw-r--r--sound/soc/codecs/rt5682.h24
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c4
-rw-r--r--sound/soc/fsl/imx-audmux.c24
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/intel/Kconfig2
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c8
-rw-r--r--sound/soc/intel/boards/broadwell.c2
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c45
-rw-r--r--sound/soc/intel/boards/haswell.c2
-rw-r--r--sound/soc/intel/skylake/skl.c13
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c17
-rw-r--r--sound/soc/qcom/sdm845.c31
-rw-r--r--sound/soc/samsung/i2s.c18
-rw-r--r--sound/soc/sh/dma-sh7760.c2
-rw-r--r--sound/soc/sh/rcar/core.c8
-rw-r--r--sound/soc/sh/rcar/ssi.c2
-rw-r--r--sound/soc/sh/rcar/ssiu.c2
-rw-r--r--sound/soc/soc-core.c43
-rw-r--r--sound/soc/soc-dapm.c34
-rw-r--r--sound/soc/soc-topology.c13
-rw-r--r--sound/soc/ti/davinci-mcasp.c136
-rw-r--r--sound/soc/xilinx/Kconfig2
-rw-r--r--sound/soc/xilinx/xlnx_i2s.c15
-rw-r--r--sound/sparc/dbri.c4
-rw-r--r--sound/usb/card.c2
-rw-r--r--sound/usb/mixer.c29
-rw-r--r--sound/usb/pcm.c9
-rw-r--r--sound/usb/quirks-table.h6
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--sound/usb/stream.c36
-rw-r--r--tools/arch/powerpc/include/uapi/asm/perf_regs.h1
-rw-r--r--tools/arch/powerpc/include/uapi/asm/unistd.h404
-rw-r--r--tools/arch/riscv/include/uapi/asm/bitsperlong.h25
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/bpf/bpftool/Makefile9
-rw-r--r--tools/bpf/bpftool/btf_dumper.c13
-rw-r--r--tools/bpf/bpftool/common.c6
-rw-r--r--tools/bpf/bpftool/json_writer.c7
-rw-r--r--tools/bpf/bpftool/json_writer.h5
-rw-r--r--tools/bpf/bpftool/map.c33
-rw-r--r--tools/bpf/bpftool/prog.c5
-rw-r--r--tools/iio/iio_generic_buffer.c2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/asm/bitsperlong.h4
-rw-r--r--tools/include/uapi/drm/i915_drm.h8
-rw-r--r--tools/include/uapi/linux/fs.h60
-rw-r--r--tools/include/uapi/linux/if_link.h19
-rw-r--r--tools/include/uapi/linux/in.h10
-rw-r--r--tools/include/uapi/linux/kvm.h19
-rw-r--r--tools/include/uapi/linux/mount.h58
-rw-r--r--tools/include/uapi/linux/pkt_sched.h1163
-rw-r--r--tools/include/uapi/linux/prctl.h8
-rw-r--r--tools/include/uapi/linux/vhost.h113
-rw-r--r--tools/lib/bpf/.gitignore1
-rw-r--r--tools/lib/bpf/README.rst14
-rw-r--r--tools/lib/bpf/bpf.c19
-rw-r--r--tools/lib/traceevent/event-parse-api.c4
-rw-r--r--tools/lib/traceevent/event-parse-local.h4
-rw-r--r--tools/lib/traceevent/event-parse.c129
-rw-r--r--tools/lib/traceevent/event-parse.h17
-rw-r--r--tools/lib/traceevent/plugin_kvm.c2
-rw-r--r--tools/lib/traceevent/trace-seq.c17
-rw-r--r--tools/perf/Documentation/perf-c2c.txt16
-rw-r--r--tools/perf/Documentation/perf-mem.txt2
-rw-r--r--tools/perf/Makefile.perf8
-rw-r--r--tools/perf/arch/arm/tests/Build1
-rw-r--r--tools/perf/arch/arm/tests/arch-tests.c4
-rw-r--r--tools/perf/arch/arm/tests/vectors-page.c24
-rw-r--r--tools/perf/arch/powerpc/Makefile15
-rwxr-xr-xtools/perf/arch/powerpc/entry/syscalls/mksyscalltbl22
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl427
-rw-r--r--tools/perf/arch/powerpc/include/perf_regs.h3
-rw-r--r--tools/perf/arch/powerpc/util/Build1
-rw-r--r--tools/perf/arch/powerpc/util/mem-events.c11
-rw-r--r--tools/perf/arch/powerpc/util/perf_regs.c1
-rw-r--r--tools/perf/builtin-script.c9
-rw-r--r--tools/perf/builtin-stat.c3
-rw-r--r--tools/perf/builtin-top.c7
-rw-r--r--tools/perf/builtin-trace.c40
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/perf-read-vdso.c6
-rw-r--r--tools/perf/tests/attr.py32
-rw-r--r--tools/perf/tests/evsel-tp-sched.c2
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh3
-rw-r--r--tools/perf/tests/tests.h5
-rwxr-xr-xtools/perf/trace/beauty/mount_flags.sh4
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh2
-rw-r--r--tools/perf/ui/browsers/annotate.c16
-rw-r--r--tools/perf/util/annotate.c8
-rw-r--r--tools/perf/util/c++/clang.cpp2
-rw-r--r--tools/perf/util/callchain.c32
-rw-r--r--tools/perf/util/callchain.h1
-rw-r--r--tools/perf/util/cpumap.c11
-rw-r--r--tools/perf/util/find-map.c (renamed from tools/perf/util/find-vdso-map.c)7
-rw-r--r--tools/perf/util/machine.c2
-rw-r--r--tools/perf/util/mem-events.c2
-rw-r--r--tools/perf/util/ordered-events.c6
-rw-r--r--tools/perf/util/setup.py2
-rw-r--r--tools/perf/util/strbuf.c1
-rw-r--r--tools/perf/util/symbol-elf.c23
-rw-r--r--tools/perf/util/symbol.c1
-rw-r--r--tools/perf/util/vdso.c6
-rw-r--r--tools/testing/nvdimm/dimm_devs.c4
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile5
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h30
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c6
-rw-r--r--tools/testing/selftests/bpf/test_btf.c38
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c2
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c2
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c10
-rw-r--r--tools/testing/selftests/bpf/test_netcnt.c2
-rw-r--r--tools/testing/selftests/bpf/test_progs.c30
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c2
-rw-r--r--tools/testing/selftests/bpf/test_sock.c2
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c55
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c2
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c2
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c120
-rwxr-xr-xtools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh13
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh20
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan.sh18
-rw-r--r--tools/testing/selftests/filesystems/binderfs/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/binderfs/Makefile6
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c275
-rw-r--r--tools/testing/selftests/filesystems/binderfs/config3
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c9
-rw-r--r--tools/testing/selftests/ir/Makefile2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c6
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_aware.sh47
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh2
-rw-r--r--tools/testing/selftests/net/ip_defrag.c96
-rwxr-xr-xtools/testing/selftests/net/ip_defrag.sh9
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy.sh153
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rw-r--r--tools/testing/selftests/netfilter/config2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh762
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile3
-rw-r--r--tools/testing/selftests/networking/timestamping/txtimestamp.c2
-rw-r--r--tools/testing/selftests/proc/.gitignore1
-rw-r--r--tools/testing/selftests/proc/Makefile1
-rw-r--r--tools/testing/selftests/proc/setns-dcache.c129
-rw-r--r--tools/testing/selftests/rtc/rtctest.c109
-rw-r--r--tools/testing/selftests/seccomp/Makefile2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c82
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json88
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json31
-rw-r--r--tools/testing/selftests/timers/Makefile2
-rw-r--r--tools/testing/selftests/vm/gup_benchmark.c1
-rw-r--r--tools/testing/selftests/x86/mpx-mini-test.c2
-rw-r--r--tools/testing/selftests/x86/protection_keys.c41
-rw-r--r--tools/testing/selftests/x86/unwind_vdso.c1
-rw-r--r--tools/thermal/tmon/Makefile2
-rw-r--r--tools/vm/page_owner_sort.c4
-rw-r--r--virt/kvm/arm/arm.c10
-rw-r--r--virt/kvm/arm/mmu.c9
-rw-r--r--virt/kvm/arm/psci.c36
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c34
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c118
-rw-r--r--virt/kvm/kvm_main.c12
2112 files changed, 30043 insertions, 12364 deletions
diff --git a/.clang-format b/.clang-format
index e6080f5834a3..bc2ffb2a0b53 100644
--- a/.clang-format
+++ b/.clang-format
@@ -72,6 +72,10 @@ ForEachMacros:
72 - 'apei_estatus_for_each_section' 72 - 'apei_estatus_for_each_section'
73 - 'ata_for_each_dev' 73 - 'ata_for_each_dev'
74 - 'ata_for_each_link' 74 - 'ata_for_each_link'
75 - '__ata_qc_for_each'
76 - 'ata_qc_for_each'
77 - 'ata_qc_for_each_raw'
78 - 'ata_qc_for_each_with_internal'
75 - 'ax25_for_each' 79 - 'ax25_for_each'
76 - 'ax25_uid_for_each' 80 - 'ax25_uid_for_each'
77 - 'bio_for_each_integrity_vec' 81 - 'bio_for_each_integrity_vec'
@@ -85,6 +89,7 @@ ForEachMacros:
85 - 'blk_queue_for_each_rl' 89 - 'blk_queue_for_each_rl'
86 - 'bond_for_each_slave' 90 - 'bond_for_each_slave'
87 - 'bond_for_each_slave_rcu' 91 - 'bond_for_each_slave_rcu'
92 - 'bpf_for_each_spilled_reg'
88 - 'btree_for_each_safe128' 93 - 'btree_for_each_safe128'
89 - 'btree_for_each_safe32' 94 - 'btree_for_each_safe32'
90 - 'btree_for_each_safe64' 95 - 'btree_for_each_safe64'
@@ -103,6 +108,8 @@ ForEachMacros:
103 - 'drm_atomic_crtc_for_each_plane' 108 - 'drm_atomic_crtc_for_each_plane'
104 - 'drm_atomic_crtc_state_for_each_plane' 109 - 'drm_atomic_crtc_state_for_each_plane'
105 - 'drm_atomic_crtc_state_for_each_plane_state' 110 - 'drm_atomic_crtc_state_for_each_plane_state'
111 - 'drm_atomic_for_each_plane_damage'
112 - 'drm_connector_for_each_possible_encoder'
106 - 'drm_for_each_connector_iter' 113 - 'drm_for_each_connector_iter'
107 - 'drm_for_each_crtc' 114 - 'drm_for_each_crtc'
108 - 'drm_for_each_encoder' 115 - 'drm_for_each_encoder'
@@ -121,11 +128,21 @@ ForEachMacros:
121 - 'for_each_bio' 128 - 'for_each_bio'
122 - 'for_each_board_func_rsrc' 129 - 'for_each_board_func_rsrc'
123 - 'for_each_bvec' 130 - 'for_each_bvec'
131 - 'for_each_card_components'
132 - 'for_each_card_links'
133 - 'for_each_card_links_safe'
134 - 'for_each_card_prelinks'
135 - 'for_each_card_rtds'
136 - 'for_each_card_rtds_safe'
137 - 'for_each_cgroup_storage_type'
124 - 'for_each_child_of_node' 138 - 'for_each_child_of_node'
125 - 'for_each_clear_bit' 139 - 'for_each_clear_bit'
126 - 'for_each_clear_bit_from' 140 - 'for_each_clear_bit_from'
127 - 'for_each_cmsghdr' 141 - 'for_each_cmsghdr'
128 - 'for_each_compatible_node' 142 - 'for_each_compatible_node'
143 - 'for_each_component_dais'
144 - 'for_each_component_dais_safe'
145 - 'for_each_comp_order'
129 - 'for_each_console' 146 - 'for_each_console'
130 - 'for_each_cpu' 147 - 'for_each_cpu'
131 - 'for_each_cpu_and' 148 - 'for_each_cpu_and'
@@ -133,6 +150,10 @@ ForEachMacros:
133 - 'for_each_cpu_wrap' 150 - 'for_each_cpu_wrap'
134 - 'for_each_dev_addr' 151 - 'for_each_dev_addr'
135 - 'for_each_dma_cap_mask' 152 - 'for_each_dma_cap_mask'
153 - 'for_each_dpcm_be'
154 - 'for_each_dpcm_be_rollback'
155 - 'for_each_dpcm_be_safe'
156 - 'for_each_dpcm_fe'
136 - 'for_each_drhd_unit' 157 - 'for_each_drhd_unit'
137 - 'for_each_dss_dev' 158 - 'for_each_dss_dev'
138 - 'for_each_efi_memory_desc' 159 - 'for_each_efi_memory_desc'
@@ -149,6 +170,7 @@ ForEachMacros:
149 - 'for_each_iommu' 170 - 'for_each_iommu'
150 - 'for_each_ip_tunnel_rcu' 171 - 'for_each_ip_tunnel_rcu'
151 - 'for_each_irq_nr' 172 - 'for_each_irq_nr'
173 - 'for_each_link_codecs'
152 - 'for_each_lru' 174 - 'for_each_lru'
153 - 'for_each_matching_node' 175 - 'for_each_matching_node'
154 - 'for_each_matching_node_and_match' 176 - 'for_each_matching_node_and_match'
@@ -160,6 +182,7 @@ ForEachMacros:
160 - 'for_each_mem_range_rev' 182 - 'for_each_mem_range_rev'
161 - 'for_each_migratetype_order' 183 - 'for_each_migratetype_order'
162 - 'for_each_msi_entry' 184 - 'for_each_msi_entry'
185 - 'for_each_msi_entry_safe'
163 - 'for_each_net' 186 - 'for_each_net'
164 - 'for_each_netdev' 187 - 'for_each_netdev'
165 - 'for_each_netdev_continue' 188 - 'for_each_netdev_continue'
@@ -183,12 +206,14 @@ ForEachMacros:
183 - 'for_each_node_with_property' 206 - 'for_each_node_with_property'
184 - 'for_each_of_allnodes' 207 - 'for_each_of_allnodes'
185 - 'for_each_of_allnodes_from' 208 - 'for_each_of_allnodes_from'
209 - 'for_each_of_cpu_node'
186 - 'for_each_of_pci_range' 210 - 'for_each_of_pci_range'
187 - 'for_each_old_connector_in_state' 211 - 'for_each_old_connector_in_state'
188 - 'for_each_old_crtc_in_state' 212 - 'for_each_old_crtc_in_state'
189 - 'for_each_oldnew_connector_in_state' 213 - 'for_each_oldnew_connector_in_state'
190 - 'for_each_oldnew_crtc_in_state' 214 - 'for_each_oldnew_crtc_in_state'
191 - 'for_each_oldnew_plane_in_state' 215 - 'for_each_oldnew_plane_in_state'
216 - 'for_each_oldnew_plane_in_state_reverse'
192 - 'for_each_oldnew_private_obj_in_state' 217 - 'for_each_oldnew_private_obj_in_state'
193 - 'for_each_old_plane_in_state' 218 - 'for_each_old_plane_in_state'
194 - 'for_each_old_private_obj_in_state' 219 - 'for_each_old_private_obj_in_state'
@@ -206,14 +231,17 @@ ForEachMacros:
206 - 'for_each_process' 231 - 'for_each_process'
207 - 'for_each_process_thread' 232 - 'for_each_process_thread'
208 - 'for_each_property_of_node' 233 - 'for_each_property_of_node'
234 - 'for_each_registered_fb'
209 - 'for_each_reserved_mem_region' 235 - 'for_each_reserved_mem_region'
210 - 'for_each_resv_unavail_range' 236 - 'for_each_rtd_codec_dai'
237 - 'for_each_rtd_codec_dai_rollback'
211 - 'for_each_rtdcom' 238 - 'for_each_rtdcom'
212 - 'for_each_rtdcom_safe' 239 - 'for_each_rtdcom_safe'
213 - 'for_each_set_bit' 240 - 'for_each_set_bit'
214 - 'for_each_set_bit_from' 241 - 'for_each_set_bit_from'
215 - 'for_each_sg' 242 - 'for_each_sg'
216 - 'for_each_sg_page' 243 - 'for_each_sg_page'
244 - 'for_each_sibling_event'
217 - '__for_each_thread' 245 - '__for_each_thread'
218 - 'for_each_thread' 246 - 'for_each_thread'
219 - 'for_each_zone' 247 - 'for_each_zone'
@@ -251,6 +279,8 @@ ForEachMacros:
251 - 'hlist_nulls_for_each_entry_from' 279 - 'hlist_nulls_for_each_entry_from'
252 - 'hlist_nulls_for_each_entry_rcu' 280 - 'hlist_nulls_for_each_entry_rcu'
253 - 'hlist_nulls_for_each_entry_safe' 281 - 'hlist_nulls_for_each_entry_safe'
282 - 'i3c_bus_for_each_i2cdev'
283 - 'i3c_bus_for_each_i3cdev'
254 - 'ide_host_for_each_port' 284 - 'ide_host_for_each_port'
255 - 'ide_port_for_each_dev' 285 - 'ide_port_for_each_dev'
256 - 'ide_port_for_each_present_dev' 286 - 'ide_port_for_each_present_dev'
@@ -267,11 +297,14 @@ ForEachMacros:
267 - 'kvm_for_each_memslot' 297 - 'kvm_for_each_memslot'
268 - 'kvm_for_each_vcpu' 298 - 'kvm_for_each_vcpu'
269 - 'list_for_each' 299 - 'list_for_each'
300 - 'list_for_each_codec'
301 - 'list_for_each_codec_safe'
270 - 'list_for_each_entry' 302 - 'list_for_each_entry'
271 - 'list_for_each_entry_continue' 303 - 'list_for_each_entry_continue'
272 - 'list_for_each_entry_continue_rcu' 304 - 'list_for_each_entry_continue_rcu'
273 - 'list_for_each_entry_continue_reverse' 305 - 'list_for_each_entry_continue_reverse'
274 - 'list_for_each_entry_from' 306 - 'list_for_each_entry_from'
307 - 'list_for_each_entry_from_rcu'
275 - 'list_for_each_entry_from_reverse' 308 - 'list_for_each_entry_from_reverse'
276 - 'list_for_each_entry_lockless' 309 - 'list_for_each_entry_lockless'
277 - 'list_for_each_entry_rcu' 310 - 'list_for_each_entry_rcu'
@@ -291,6 +324,7 @@ ForEachMacros:
291 - 'media_device_for_each_intf' 324 - 'media_device_for_each_intf'
292 - 'media_device_for_each_link' 325 - 'media_device_for_each_link'
293 - 'media_device_for_each_pad' 326 - 'media_device_for_each_pad'
327 - 'nanddev_io_for_each_page'
294 - 'netdev_for_each_lower_dev' 328 - 'netdev_for_each_lower_dev'
295 - 'netdev_for_each_lower_private' 329 - 'netdev_for_each_lower_private'
296 - 'netdev_for_each_lower_private_rcu' 330 - 'netdev_for_each_lower_private_rcu'
@@ -357,12 +391,14 @@ ForEachMacros:
357 - 'sk_nulls_for_each' 391 - 'sk_nulls_for_each'
358 - 'sk_nulls_for_each_from' 392 - 'sk_nulls_for_each_from'
359 - 'sk_nulls_for_each_rcu' 393 - 'sk_nulls_for_each_rcu'
394 - 'snd_array_for_each'
360 - 'snd_pcm_group_for_each_entry' 395 - 'snd_pcm_group_for_each_entry'
361 - 'snd_soc_dapm_widget_for_each_path' 396 - 'snd_soc_dapm_widget_for_each_path'
362 - 'snd_soc_dapm_widget_for_each_path_safe' 397 - 'snd_soc_dapm_widget_for_each_path_safe'
363 - 'snd_soc_dapm_widget_for_each_sink_path' 398 - 'snd_soc_dapm_widget_for_each_sink_path'
364 - 'snd_soc_dapm_widget_for_each_source_path' 399 - 'snd_soc_dapm_widget_for_each_source_path'
365 - 'tb_property_for_each' 400 - 'tb_property_for_each'
401 - 'tcf_exts_for_each_action'
366 - 'udp_portaddr_for_each_entry' 402 - 'udp_portaddr_for_each_entry'
367 - 'udp_portaddr_for_each_entry_rcu' 403 - 'udp_portaddr_for_each_entry_rcu'
368 - 'usb_hub_for_each_child' 404 - 'usb_hub_for_each_child'
@@ -371,6 +407,11 @@ ForEachMacros:
371 - 'v4l2_m2m_for_each_dst_buf_safe' 407 - 'v4l2_m2m_for_each_dst_buf_safe'
372 - 'v4l2_m2m_for_each_src_buf' 408 - 'v4l2_m2m_for_each_src_buf'
373 - 'v4l2_m2m_for_each_src_buf_safe' 409 - 'v4l2_m2m_for_each_src_buf_safe'
410 - 'virtio_device_for_each_vq'
411 - 'xa_for_each'
412 - 'xas_for_each'
413 - 'xas_for_each_conflict'
414 - 'xas_for_each_marked'
374 - 'zorro_for_each_dev' 415 - 'zorro_for_each_dev'
375 416
376#IncludeBlocks: Preserve # Unknown to clang-format-5.0 417#IncludeBlocks: Preserve # Unknown to clang-format-5.0
diff --git a/CREDITS b/CREDITS
index e818eb6a3e71..0175098d4776 100644
--- a/CREDITS
+++ b/CREDITS
@@ -842,10 +842,9 @@ D: ax25-utils maintainer.
842 842
843N: Helge Deller 843N: Helge Deller
844E: deller@gmx.de 844E: deller@gmx.de
845E: hdeller@redhat.de 845W: http://www.parisc-linux.org/
846D: PA-RISC Linux hacker, LASI-, ASP-, WAX-, LCD/LED-driver 846D: PA-RISC Linux architecture maintainer
847S: Schimmelsrain 1 847D: LASI-, ASP-, WAX-, LCD/LED-driver
848S: D-69231 Rauenberg
849S: Germany 848S: Germany
850 849
851N: Jean Delvare 850N: Jean Delvare
@@ -1361,7 +1360,7 @@ S: Stellenbosch, Western Cape
1361S: South Africa 1360S: South Africa
1362 1361
1363N: Grant Grundler 1362N: Grant Grundler
1364E: grundler@parisc-linux.org 1363E: grantgrundler@gmail.com
1365W: http://obmouse.sourceforge.net/ 1364W: http://obmouse.sourceforge.net/
1366W: http://www.parisc-linux.org/ 1365W: http://www.parisc-linux.org/
1367D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver 1366D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver
@@ -2492,7 +2491,7 @@ S: Syracuse, New York 13206
2492S: USA 2491S: USA
2493 2492
2494N: Kyle McMartin 2493N: Kyle McMartin
2495E: kyle@parisc-linux.org 2494E: kyle@mcmartin.ca
2496D: Linux/PARISC hacker 2495D: Linux/PARISC hacker
2497D: AD1889 sound driver 2496D: AD1889 sound driver
2498S: Ottawa, Canada 2497S: Ottawa, Canada
@@ -3780,14 +3779,13 @@ S: 21513 Conradia Ct
3780S: Cupertino, CA 95014 3779S: Cupertino, CA 95014
3781S: USA 3780S: USA
3782 3781
3783N: Thibaut Varene 3782N: Thibaut Varène
3784E: T-Bone@parisc-linux.org 3783E: hacks+kernel@slashdirt.org
3785W: http://www.parisc-linux.org/~varenet/ 3784W: http://hacks.slashdirt.org/
3786P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063
3787D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits 3785D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits
3788D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there 3786D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there
3789D: AD1889 sound driver 3787D: AD1889 sound driver
3790S: Paris, France 3788S: France
3791 3789
3792N: Heikki Vatiainen 3790N: Heikki Vatiainen
3793E: hessu@cs.tut.fi 3791E: hessu@cs.tut.fi
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
index 9b642669cb16..169fe08a649b 100644
--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -24,7 +24,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
24 cpld3_version 24 cpld3_version
25 25
26Date: November 2018 26Date: November 2018
27KernelVersion: 4.21 27KernelVersion: 5.0
28Contact: Vadim Pasternak <vadimpmellanox.com> 28Contact: Vadim Pasternak <vadimpmellanox.com>
29Description: These files show with which CPLD versions have been burned 29Description: These files show with which CPLD versions have been burned
30 on LED board. 30 on LED board.
@@ -35,7 +35,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
35 jtag_enable 35 jtag_enable
36 36
37Date: November 2018 37Date: November 2018
38KernelVersion: 4.21 38KernelVersion: 5.0
39Contact: Vadim Pasternak <vadimpmellanox.com> 39Contact: Vadim Pasternak <vadimpmellanox.com>
40Description: These files enable and disable the access to the JTAG domain. 40Description: These files enable and disable the access to the JTAG domain.
41 By default access to the JTAG domain is disabled. 41 By default access to the JTAG domain is disabled.
@@ -105,7 +105,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
105 reset_voltmon_upgrade_fail 105 reset_voltmon_upgrade_fail
106 106
107Date: November 2018 107Date: November 2018
108KernelVersion: 4.21 108KernelVersion: 5.0
109Contact: Vadim Pasternak <vadimpmellanox.com> 109Contact: Vadim Pasternak <vadimpmellanox.com>
110Description: These files show the system reset cause, as following: ComEx 110Description: These files show the system reset cause, as following: ComEx
111 power fail, reset from ComEx, system platform reset, reset 111 power fail, reset from ComEx, system platform reset, reset
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 7710d4022b19..dfad7427817c 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -279,3 +279,12 @@ Description:
279 size in 512B sectors of the zones of the device, with 279 size in 512B sectors of the zones of the device, with
280 the eventual exception of the last zone of the device 280 the eventual exception of the last zone of the device
281 which may be smaller. 281 which may be smaller.
282
283What: /sys/block/<disk>/queue/io_timeout
284Date: November 2018
285Contact: Weiping Zhang <zhangweiping@didiglobal.com>
286Description:
287 io_timeout is the request timeout in milliseconds. If a request
288 does not complete in this time then the block driver timeout
289 handler is invoked. That timeout handler can decide to retry
290 the request, to fail it or to start a device recovery strategy.
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index 9d2339a485c8..14b2bf2e5105 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -122,11 +122,18 @@ Description:
122 statistics (bd_count, bd_reads, bd_writes) in a format 122 statistics (bd_count, bd_reads, bd_writes) in a format
123 similar to block layer statistics file format. 123 similar to block layer statistics file format.
124 124
125What: /sys/block/zram<id>/writeback_limit_enable
126Date: November 2018
127Contact: Minchan Kim <minchan@kernel.org>
128Description:
129 The writeback_limit_enable file is read-write and specifies
130 eanbe of writeback_limit feature. "1" means eable the feature.
131 No limit "0" is the initial state.
132
125What: /sys/block/zram<id>/writeback_limit 133What: /sys/block/zram<id>/writeback_limit
126Date: November 2018 134Date: November 2018
127Contact: Minchan Kim <minchan@kernel.org> 135Contact: Minchan Kim <minchan@kernel.org>
128Description: 136Description:
129 The writeback_limit file is read-write and specifies the maximum 137 The writeback_limit file is read-write and specifies the maximum
130 amount of writeback ZRAM can do. The limit could be changed 138 amount of writeback ZRAM can do. The limit could be changed
131 in run time and "0" means disable the limit. 139 in run time.
132 No limit is the initial state.
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 0797eec76be1..47e577264198 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -1,9 +1,9 @@
1.. _readme: 1.. _readme:
2 2
3Linux kernel release 4.x <http://kernel.org/> 3Linux kernel release 5.x <http://kernel.org/>
4============================================= 4=============================================
5 5
6These are the release notes for Linux version 4. Read them carefully, 6These are the release notes for Linux version 5. Read them carefully,
7as they tell you what this is all about, explain how to install the 7as they tell you what this is all about, explain how to install the
8kernel, and what to do if something goes wrong. 8kernel, and what to do if something goes wrong.
9 9
@@ -63,7 +63,7 @@ Installing the kernel source
63 directory where you have permissions (e.g. your home directory) and 63 directory where you have permissions (e.g. your home directory) and
64 unpack it:: 64 unpack it::
65 65
66 xz -cd linux-4.X.tar.xz | tar xvf - 66 xz -cd linux-5.x.tar.xz | tar xvf -
67 67
68 Replace "X" with the version number of the latest kernel. 68 Replace "X" with the version number of the latest kernel.
69 69
@@ -72,26 +72,26 @@ Installing the kernel source
72 files. They should match the library, and not get messed up by 72 files. They should match the library, and not get messed up by
73 whatever the kernel-du-jour happens to be. 73 whatever the kernel-du-jour happens to be.
74 74
75 - You can also upgrade between 4.x releases by patching. Patches are 75 - You can also upgrade between 5.x releases by patching. Patches are
76 distributed in the xz format. To install by patching, get all the 76 distributed in the xz format. To install by patching, get all the
77 newer patch files, enter the top level directory of the kernel source 77 newer patch files, enter the top level directory of the kernel source
78 (linux-4.X) and execute:: 78 (linux-5.x) and execute::
79 79
80 xz -cd ../patch-4.x.xz | patch -p1 80 xz -cd ../patch-5.x.xz | patch -p1
81 81
82 Replace "x" for all versions bigger than the version "X" of your current 82 Replace "x" for all versions bigger than the version "x" of your current
83 source tree, **in_order**, and you should be ok. You may want to remove 83 source tree, **in_order**, and you should be ok. You may want to remove
84 the backup files (some-file-name~ or some-file-name.orig), and make sure 84 the backup files (some-file-name~ or some-file-name.orig), and make sure
85 that there are no failed patches (some-file-name# or some-file-name.rej). 85 that there are no failed patches (some-file-name# or some-file-name.rej).
86 If there are, either you or I have made a mistake. 86 If there are, either you or I have made a mistake.
87 87
88 Unlike patches for the 4.x kernels, patches for the 4.x.y kernels 88 Unlike patches for the 5.x kernels, patches for the 5.x.y kernels
89 (also known as the -stable kernels) are not incremental but instead apply 89 (also known as the -stable kernels) are not incremental but instead apply
90 directly to the base 4.x kernel. For example, if your base kernel is 4.0 90 directly to the base 5.x kernel. For example, if your base kernel is 5.0
91 and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1 91 and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1
92 and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and 92 and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and
93 want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is, 93 want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is,
94 patch -R) **before** applying the 4.0.3 patch. You can read more on this in 94 patch -R) **before** applying the 5.0.3 patch. You can read more on this in
95 :ref:`Documentation/process/applying-patches.rst <applying_patches>`. 95 :ref:`Documentation/process/applying-patches.rst <applying_patches>`.
96 96
97 Alternatively, the script patch-kernel can be used to automate this 97 Alternatively, the script patch-kernel can be used to automate this
@@ -114,7 +114,7 @@ Installing the kernel source
114Software requirements 114Software requirements
115--------------------- 115---------------------
116 116
117 Compiling and running the 4.x kernels requires up-to-date 117 Compiling and running the 5.x kernels requires up-to-date
118 versions of various software packages. Consult 118 versions of various software packages. Consult
119 :ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers 119 :ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers
120 required and how to get updates for these packages. Beware that using 120 required and how to get updates for these packages. Beware that using
@@ -132,12 +132,12 @@ Build directory for the kernel
132 place for the output files (including .config). 132 place for the output files (including .config).
133 Example:: 133 Example::
134 134
135 kernel source code: /usr/src/linux-4.X 135 kernel source code: /usr/src/linux-5.x
136 build directory: /home/name/build/kernel 136 build directory: /home/name/build/kernel
137 137
138 To configure and build the kernel, use:: 138 To configure and build the kernel, use::
139 139
140 cd /usr/src/linux-4.X 140 cd /usr/src/linux-5.x
141 make O=/home/name/build/kernel menuconfig 141 make O=/home/name/build/kernel menuconfig
142 make O=/home/name/build/kernel 142 make O=/home/name/build/kernel
143 sudo make O=/home/name/build/kernel modules_install install 143 sudo make O=/home/name/build/kernel modules_install install
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index b799bcf67d7b..858b6c0b9a15 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1696,12 +1696,11 @@
1696 By default, super page will be supported if Intel IOMMU 1696 By default, super page will be supported if Intel IOMMU
1697 has the capability. With this option, super page will 1697 has the capability. With this option, super page will
1698 not be supported. 1698 not be supported.
1699 sm_off [Default Off] 1699 sm_on [Default Off]
1700 By default, scalable mode will be supported if the 1700 By default, scalable mode will be disabled even if the
1701 hardware advertises that it has support for the scalable 1701 hardware advertises that it has support for the scalable
1702 mode translation. With this option set, scalable mode 1702 mode translation. With this option set, scalable mode
1703 will not be used even on hardware which claims to support 1703 will be used on hardware which claims to support it.
1704 it.
1705 tboot_noforce [Default Off] 1704 tboot_noforce [Default Off]
1706 Do not force the Intel IOMMU enabled under tboot. 1705 Do not force the Intel IOMMU enabled under tboot.
1707 By default, tboot will force Intel IOMMU on, which 1706 By default, tboot will force Intel IOMMU on, which
diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
index 8d8d8f06cab2..98a8dd5ee385 100644
--- a/Documentation/block/bfq-iosched.txt
+++ b/Documentation/block/bfq-iosched.txt
@@ -357,6 +357,13 @@ video playing/streaming, a very low drop rate may be more important
357than maximum throughput. In these cases, consider setting the 357than maximum throughput. In these cases, consider setting the
358strict_guarantees parameter. 358strict_guarantees parameter.
359 359
360slice_idle_us
361-------------
362
363Controls the same tuning parameter as slice_idle, but in microseconds.
364Either tunable can be used to set idling behavior. Afterwards, the
365other tunable will reflect the newly set value in sysfs.
366
360strict_guarantees 367strict_guarantees
361----------------- 368-----------------
362 369
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index ea2dafe49ae8..4cad1024fff7 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -88,7 +88,8 @@ shared_tags=[0/1]: Default: 0
88 88
89zoned=[0/1]: Default: 0 89zoned=[0/1]: Default: 0
90 0: Block device is exposed as a random-access block device. 90 0: Block device is exposed as a random-access block device.
91 1: Block device is exposed as a host-managed zoned block device. 91 1: Block device is exposed as a host-managed zoned block device. Requires
92 CONFIG_BLK_DEV_ZONED.
92 93
93zone_size=[MB]: Default: 256 94zone_size=[MB]: Default: 256
94 Per zone size when exposed as a zoned block device. Must be a power of two. 95 Per zone size when exposed as a zoned block device. Must be a power of two.
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 39e286d7afc9..83b457e24bba 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -67,6 +67,13 @@ If set to a value larger than 0, the kernel will put the process issuing
67IO to sleep for this amount of microseconds before entering classic 67IO to sleep for this amount of microseconds before entering classic
68polling. 68polling.
69 69
70io_timeout (RW)
71---------------
72io_timeout is the request timeout in milliseconds. If a request does not
73complete in this time then the block driver timeout handler is invoked.
74That timeout handler can decide to retry the request, to fail it or to start
75a device recovery strategy.
76
70iostats (RW) 77iostats (RW)
71------------- 78-------------
72This file is used to control (on/off) the iostats accounting of the 79This file is used to control (on/off) the iostats accounting of the
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 436c5e98e1b6..4df0ce271085 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -156,22 +156,23 @@ Per-device statistics are exported as various nodes under /sys/block/zram<id>/
156A brief description of exported device attributes. For more details please 156A brief description of exported device attributes. For more details please
157read Documentation/ABI/testing/sysfs-block-zram. 157read Documentation/ABI/testing/sysfs-block-zram.
158 158
159Name access description 159Name access description
160---- ------ ----------- 160---- ------ -----------
161disksize RW show and set the device's disk size 161disksize RW show and set the device's disk size
162initstate RO shows the initialization state of the device 162initstate RO shows the initialization state of the device
163reset WO trigger device reset 163reset WO trigger device reset
164mem_used_max WO reset the `mem_used_max' counter (see later) 164mem_used_max WO reset the `mem_used_max' counter (see later)
165mem_limit WO specifies the maximum amount of memory ZRAM can use 165mem_limit WO specifies the maximum amount of memory ZRAM can use
166 to store the compressed data 166 to store the compressed data
167writeback_limit WO specifies the maximum amount of write IO zram can 167writeback_limit WO specifies the maximum amount of write IO zram can
168 write out to backing device as 4KB unit 168 write out to backing device as 4KB unit
169max_comp_streams RW the number of possible concurrent compress operations 169writeback_limit_enable RW show and set writeback_limit feature
170comp_algorithm RW show and change the compression algorithm 170max_comp_streams RW the number of possible concurrent compress operations
171compact WO trigger memory compaction 171comp_algorithm RW show and change the compression algorithm
172debug_stat RO this file is used for zram debugging purposes 172compact WO trigger memory compaction
173backing_dev RW set up backend storage for zram to write out 173debug_stat RO this file is used for zram debugging purposes
174idle WO mark allocated slot as idle 174backing_dev RW set up backend storage for zram to write out
175idle WO mark allocated slot as idle
175 176
176 177
177User space is advised to use the following files to read the device statistics. 178User space is advised to use the following files to read the device statistics.
@@ -280,32 +281,51 @@ With the command, zram writeback idle pages from memory to the storage.
280If there are lots of write IO with flash device, potentially, it has 281If there are lots of write IO with flash device, potentially, it has
281flash wearout problem so that admin needs to design write limitation 282flash wearout problem so that admin needs to design write limitation
282to guarantee storage health for entire product life. 283to guarantee storage health for entire product life.
283To overcome the concern, zram supports "writeback_limit". 284
284The "writeback_limit"'s default value is 0 so that it doesn't limit 285To overcome the concern, zram supports "writeback_limit" feature.
285any writeback. If admin want to measure writeback count in a certain 286The "writeback_limit_enable"'s default value is 0 so that it doesn't limit
286period, he could know it via /sys/block/zram0/bd_stat's 3rd column. 287any writeback. IOW, if admin want to apply writeback budget, he should
288enable writeback_limit_enable via
289
290 $ echo 1 > /sys/block/zramX/writeback_limit_enable
291
292Once writeback_limit_enable is set, zram doesn't allow any writeback
293until admin set the budget via /sys/block/zramX/writeback_limit.
294
295(If admin doesn't enable writeback_limit_enable, writeback_limit's value
296assigned via /sys/block/zramX/writeback_limit is meaninless.)
287 297
288If admin want to limit writeback as per-day 400M, he could do it 298If admin want to limit writeback as per-day 400M, he could do it
289like below. 299like below.
290 300
291 MB_SHIFT=20 301 $ MB_SHIFT=20
292 4K_SHIFT=12 302 $ 4K_SHIFT=12
293 echo $((400<<MB_SHIFT>>4K_SHIFT)) > \ 303 $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
294 /sys/block/zram0/writeback_limit. 304 /sys/block/zram0/writeback_limit.
305 $ echo 1 > /sys/block/zram0/writeback_limit_enable
295 306
296If admin want to allow further write again, he could do it like below 307If admin want to allow further write again once the bugdet is exausted,
308he could do it like below
297 309
298 echo 0 > /sys/block/zram0/writeback_limit 310 $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
311 /sys/block/zram0/writeback_limit
299 312
300If admin want to see remaining writeback budget since he set, 313If admin want to see remaining writeback budget since he set,
301 314
302 cat /sys/block/zram0/writeback_limit 315 $ cat /sys/block/zramX/writeback_limit
316
317If admin want to disable writeback limit, he could do
318
319 $ echo 0 > /sys/block/zramX/writeback_limit_enable
303 320
304The writeback_limit count will reset whenever you reset zram(e.g., 321The writeback_limit count will reset whenever you reset zram(e.g.,
305system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of 322system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of
306writeback happened until you reset the zram to allocate extra writeback 323writeback happened until you reset the zram to allocate extra writeback
307budget in next setting is user's job. 324budget in next setting is user's job.
308 325
326If admin want to measure writeback count in a certain period, he could
327know it via /sys/block/zram0/bd_stat's 3rd column.
328
309= memory tracking 329= memory tracking
310 330
311With CONFIG_ZRAM_MEMORY_TRACKING, user can know information of the 331With CONFIG_ZRAM_MEMORY_TRACKING, user can know information of the
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst
index 6780a6d81745..7cc9e368c1e9 100644
--- a/Documentation/bpf/bpf_design_QA.rst
+++ b/Documentation/bpf/bpf_design_QA.rst
@@ -157,12 +157,11 @@ Q: Does BPF have a stable ABI?
157------------------------------ 157------------------------------
158A: YES. BPF instructions, arguments to BPF programs, set of helper 158A: YES. BPF instructions, arguments to BPF programs, set of helper
159functions and their arguments, recognized return codes are all part 159functions and their arguments, recognized return codes are all part
160of ABI. However when tracing programs are using bpf_probe_read() helper 160of ABI. However there is one specific exception to tracing programs
161to walk kernel internal datastructures and compile with kernel 161which are using helpers like bpf_probe_read() to walk kernel internal
162internal headers these accesses can and will break with newer 162data structures and compile with kernel internal headers. Both of these
163kernels. The union bpf_attr -> kern_version is checked at load time 163kernel internals are subject to change and can break with newer kernels
164to prevent accidentally loading kprobe-based bpf programs written 164such that the program needs to be adapted accordingly.
165for a different kernel. Networking programs don't do kern_version check.
166 165
167Q: How much stack space a BPF program uses? 166Q: How much stack space a BPF program uses?
168------------------------------------------- 167-------------------------------------------
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index 6a6d67acaf69..5d54b27c6eba 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -108,12 +108,13 @@ some, but not all of the other indices changing.
108 108
109Sometimes you need to ensure that a subsequent call to :c:func:`xa_store` 109Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
110will not need to allocate memory. The :c:func:`xa_reserve` function 110will not need to allocate memory. The :c:func:`xa_reserve` function
111will store a reserved entry at the indicated index. Users of the normal 111will store a reserved entry at the indicated index. Users of the
112API will see this entry as containing ``NULL``. If you do not need to 112normal API will see this entry as containing ``NULL``. If you do
113use the reserved entry, you can call :c:func:`xa_release` to remove the 113not need to use the reserved entry, you can call :c:func:`xa_release`
114unused entry. If another user has stored to the entry in the meantime, 114to remove the unused entry. If another user has stored to the entry
115:c:func:`xa_release` will do nothing; if instead you want the entry to 115in the meantime, :c:func:`xa_release` will do nothing; if instead you
116become ``NULL``, you should use :c:func:`xa_erase`. 116want the entry to become ``NULL``, you should use :c:func:`xa_erase`.
117Using :c:func:`xa_insert` on a reserved entry will fail.
117 118
118If all entries in the array are ``NULL``, the :c:func:`xa_empty` function 119If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
119will return ``true``. 120will return ``true``.
@@ -183,6 +184,8 @@ Takes xa_lock internally:
183 * :c:func:`xa_store_bh` 184 * :c:func:`xa_store_bh`
184 * :c:func:`xa_store_irq` 185 * :c:func:`xa_store_irq`
185 * :c:func:`xa_insert` 186 * :c:func:`xa_insert`
187 * :c:func:`xa_insert_bh`
188 * :c:func:`xa_insert_irq`
186 * :c:func:`xa_erase` 189 * :c:func:`xa_erase`
187 * :c:func:`xa_erase_bh` 190 * :c:func:`xa_erase_bh`
188 * :c:func:`xa_erase_irq` 191 * :c:func:`xa_erase_irq`
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 6e5cef0ed6fb..50daa0b3b032 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -17,7 +17,11 @@ extra-y += $(DT_TMP_SCHEMA)
17quiet_cmd_mk_schema = SCHEMA $@ 17quiet_cmd_mk_schema = SCHEMA $@
18 cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^) 18 cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^)
19 19
20DT_DOCS = $(shell cd $(srctree)/$(src) && find * -name '*.yaml') 20DT_DOCS = $(shell \
21 cd $(srctree)/$(src) && \
22 find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
23 )
24
21DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) 25DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
22 26
23extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES)) 27extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
index 84262cdb8d29..96fa46cb133c 100644
--- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt
+++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
@@ -235,4 +235,4 @@ cpus {
235=========================================== 235===========================================
236 236
237[1] ARM Linux Kernel documentation - CPUs bindings 237[1] ARM Linux Kernel documentation - CPUs bindings
238 Documentation/devicetree/bindings/arm/cpus.txt 238 Documentation/devicetree/bindings/arm/cpus.yaml
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index 8f0937db55c5..45730ba60af5 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -684,7 +684,7 @@ cpus {
684=========================================== 684===========================================
685 685
686[1] ARM Linux Kernel documentation - CPUs bindings 686[1] ARM Linux Kernel documentation - CPUs bindings
687 Documentation/devicetree/bindings/arm/cpus.txt 687 Documentation/devicetree/bindings/arm/cpus.yaml
688 688
689[2] ARM Linux Kernel documentation - PSCI bindings 689[2] ARM Linux Kernel documentation - PSCI bindings
690 Documentation/devicetree/bindings/arm/psci.txt 690 Documentation/devicetree/bindings/arm/psci.txt
diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt
index 1b2ab1ff5587..46652bf65147 100644
--- a/Documentation/devicetree/bindings/arm/sp810.txt
+++ b/Documentation/devicetree/bindings/arm/sp810.txt
@@ -4,7 +4,7 @@ SP810 System Controller
4Required properties: 4Required properties:
5 5
6- compatible: standard compatible string for a Primecell peripheral, 6- compatible: standard compatible string for a Primecell peripheral,
7 see Documentation/devicetree/bindings/arm/primecell.txt 7 see Documentation/devicetree/bindings/arm/primecell.yaml
8 for more details 8 for more details
9 should be: "arm,sp810", "arm,primecell" 9 should be: "arm,sp810", "arm,primecell"
10 10
diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt
index de9eb0486630..b0d80c0fb265 100644
--- a/Documentation/devicetree/bindings/arm/topology.txt
+++ b/Documentation/devicetree/bindings/arm/topology.txt
@@ -472,4 +472,4 @@ cpus {
472 472
473=============================================================================== 473===============================================================================
474[1] ARM Linux kernel documentation 474[1] ARM Linux kernel documentation
475 Documentation/devicetree/bindings/arm/cpus.txt 475 Documentation/devicetree/bindings/arm/cpus.yaml
diff --git a/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
index 2ef86ae96df8..d19885b7c73f 100644
--- a/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
+++ b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
@@ -2,13 +2,14 @@
2 2
3The Actions Semi Owl Clock Management Unit generates and supplies clock 3The Actions Semi Owl Clock Management Unit generates and supplies clock
4to various controllers within the SoC. The clock binding described here is 4to various controllers within the SoC. The clock binding described here is
5applicable to S900 and S700 SoC's. 5applicable to S900, S700 and S500 SoC's.
6 6
7Required Properties: 7Required Properties:
8 8
9- compatible: should be one of the following, 9- compatible: should be one of the following,
10 "actions,s900-cmu" 10 "actions,s900-cmu"
11 "actions,s700-cmu" 11 "actions,s700-cmu"
12 "actions,s500-cmu"
12- reg: physical base address of the controller and length of memory mapped 13- reg: physical base address of the controller and length of memory mapped
13 region. 14 region.
14- clocks: Reference to the parent clocks ("hosc", "losc") 15- clocks: Reference to the parent clocks ("hosc", "losc")
@@ -19,8 +20,8 @@ Each clock is assigned an identifier, and client nodes can use this identifier
19to specify the clock which they consume. 20to specify the clock which they consume.
20 21
21All available clocks are defined as preprocessor macros in corresponding 22All available clocks are defined as preprocessor macros in corresponding
22dt-bindings/clock/actions,s900-cmu.h or actions,s700-cmu.h header and can be 23dt-bindings/clock/actions,s900-cmu.h or actions,s700-cmu.h or
23used in device tree sources. 24actions,s500-cmu.h header and can be used in device tree sources.
24 25
25External clocks: 26External clocks:
26 27
diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
index 79511d7bb321..c41f0be5d438 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
@@ -10,6 +10,7 @@ Required Properties:
10 - GXL (S905X, S905D) : "amlogic,meson-gxl-aoclkc" 10 - GXL (S905X, S905D) : "amlogic,meson-gxl-aoclkc"
11 - GXM (S912) : "amlogic,meson-gxm-aoclkc" 11 - GXM (S912) : "amlogic,meson-gxm-aoclkc"
12 - AXG (A113D, A113X) : "amlogic,meson-axg-aoclkc" 12 - AXG (A113D, A113X) : "amlogic,meson-axg-aoclkc"
13 - G12A (S905X2, S905D2, S905Y2) : "amlogic,meson-g12a-aoclkc"
13 followed by the common "amlogic,meson-gx-aoclkc" 14 followed by the common "amlogic,meson-gx-aoclkc"
14- clocks: list of clock phandle, one for each entry clock-names. 15- clocks: list of clock phandle, one for each entry clock-names.
15- clock-names: should contain the following: 16- clock-names: should contain the following:
diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
index a6871953bf04..5c8b105be4d6 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
@@ -9,6 +9,7 @@ Required Properties:
9 "amlogic,gxbb-clkc" for GXBB SoC, 9 "amlogic,gxbb-clkc" for GXBB SoC,
10 "amlogic,gxl-clkc" for GXL and GXM SoC, 10 "amlogic,gxl-clkc" for GXL and GXM SoC,
11 "amlogic,axg-clkc" for AXG SoC. 11 "amlogic,axg-clkc" for AXG SoC.
12 "amlogic,g12a-clkc" for G12A SoC.
12- clocks : list of clock phandle, one for each entry clock-names. 13- clocks : list of clock phandle, one for each entry clock-names.
13- clock-names : should contain the following: 14- clock-names : should contain the following:
14 * "xtal": the platform xtal 15 * "xtal": the platform xtal
diff --git a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
index 50d5897c9849..183c327a7d6b 100644
--- a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
@@ -50,6 +50,8 @@ Required Properties:
50 IPs. 50 IPs.
51 - "samsung,exynos5433-cmu-cam1" - clock controller compatible for CMU_CAM1 51 - "samsung,exynos5433-cmu-cam1" - clock controller compatible for CMU_CAM1
52 which generates clocks for Cortex-A5/MIPI_CSIS2/FIMC-LITE_C/FIMC-FD IPs. 52 which generates clocks for Cortex-A5/MIPI_CSIS2/FIMC-LITE_C/FIMC-FD IPs.
53 - "samsung,exynos5433-cmu-imem" - clock controller compatible for CMU_IMEM
54 which generates clocks for SSS (Security SubSystem) and SlimSSS IPs.
53 55
54- reg: physical base address of the controller and length of memory mapped 56- reg: physical base address of the controller and length of memory mapped
55 region. 57 region.
@@ -168,6 +170,12 @@ Required Properties:
168 - aclk_cam1_400 170 - aclk_cam1_400
169 - aclk_cam1_552 171 - aclk_cam1_552
170 172
173 Input clocks for imem clock controller:
174 - oscclk
175 - aclk_imem_sssx_266
176 - aclk_imem_266
177 - aclk_imem_200
178
171Optional properties: 179Optional properties:
172 - power-domains: a phandle to respective power domain node as described by 180 - power-domains: a phandle to respective power domain node as described by
173 generic PM domain bindings (see power/power_domain.txt for more 181 generic PM domain bindings (see power/power_domain.txt for more
@@ -469,6 +477,21 @@ Example 2: Examples of clock controller nodes are listed below.
469 power-domains = <&pd_cam1>; 477 power-domains = <&pd_cam1>;
470 }; 478 };
471 479
480 cmu_imem: clock-controller@11060000 {
481 compatible = "samsung,exynos5433-cmu-imem";
482 reg = <0x11060000 0x1000>;
483 #clock-cells = <1>;
484
485 clock-names = "oscclk",
486 "aclk_imem_sssx_266",
487 "aclk_imem_266",
488 "aclk_imem_200";
489 clocks = <&xxti>,
490 <&cmu_top CLK_DIV_ACLK_IMEM_SSSX_266>,
491 <&cmu_top CLK_DIV_ACLK_IMEM_266>,
492 <&cmu_top CLK_DIV_ACLK_IMEM_200>;
493 };
494
472Example 3: UART controller node that consumes the clock generated by the clock 495Example 3: UART controller node that consumes the clock generated by the clock
473 controller. 496 controller.
474 497
diff --git a/Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt b/Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt
new file mode 100644
index 000000000000..c359367fd1a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt
@@ -0,0 +1,24 @@
1Binding for simple memory mapped io fixed-rate clock sources.
2The driver reads a clock frequency value from a single 32-bit memory mapped
3I/O register and registers it as a fixed rate clock.
4
5It was designed for test systems, like FPGA, not for complete, finished SoCs.
6
7This binding uses the common clock binding[1].
8
9[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
10
11Required properties:
12- compatible : shall be "fixed-mmio-clock".
13- #clock-cells : from common clock binding; shall be set to 0.
14- reg : Address and length of the clock value register set.
15
16Optional properties:
17- clock-output-names : From common clock binding.
18
19Example:
20sysclock: sysclock@fd020004 {
21 #clock-cells = <0>;
22 compatible = "fixed-mmio-clock";
23 reg = <0xfd020004 0x4>;
24};
diff --git a/Documentation/devicetree/bindings/clock/imx8mm-clock.txt b/Documentation/devicetree/bindings/clock/imx8mm-clock.txt
new file mode 100644
index 000000000000..8e4ab9e619a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx8mm-clock.txt
@@ -0,0 +1,29 @@
1* Clock bindings for NXP i.MX8M Mini
2
3Required properties:
4- compatible: Should be "fsl,imx8mm-ccm"
5- reg: Address and length of the register set
6- #clock-cells: Should be <1>
7- clocks: list of clock specifiers, must contain an entry for each required
8 entry in clock-names
9- clock-names: should include the following entries:
10 - "osc_32k"
11 - "osc_24m"
12 - "clk_ext1"
13 - "clk_ext2"
14 - "clk_ext3"
15 - "clk_ext4"
16
17clk: clock-controller@30380000 {
18 compatible = "fsl,imx8mm-ccm";
19 reg = <0x0 0x30380000 0x0 0x10000>;
20 #clock-cells = <1>;
21 clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>, <&clk_ext2>,
22 <&clk_ext3>, <&clk_ext4>;
23 clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
24 "clk_ext3", "clk_ext4";
25};
26
27The clock consumer should specify the desired clock by having the clock
28ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mm-clock.h
29for the full list of i.MX8M Mini clock IDs.
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
index af376a01f2b7..23b52dc02266 100644
--- a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
+++ b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
@@ -18,4 +18,4 @@ Required Properties:
18Each clock is assigned an identifier and client nodes use this identifier 18Each clock is assigned an identifier and client nodes use this identifier
19to specify the clock which they consume. 19to specify the clock which they consume.
20 20
21All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>. 21All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
index 87b4949e9bc8..944719bd586f 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
@@ -16,6 +16,7 @@ Required properties :
16 "qcom,rpmcc-msm8974", "qcom,rpmcc" 16 "qcom,rpmcc-msm8974", "qcom,rpmcc"
17 "qcom,rpmcc-apq8064", "qcom,rpmcc" 17 "qcom,rpmcc-apq8064", "qcom,rpmcc"
18 "qcom,rpmcc-msm8996", "qcom,rpmcc" 18 "qcom,rpmcc-msm8996", "qcom,rpmcc"
19 "qcom,rpmcc-msm8998", "qcom,rpmcc"
19 "qcom,rpmcc-qcs404", "qcom,rpmcc" 20 "qcom,rpmcc-qcs404", "qcom,rpmcc"
20 21
21- #clock-cells : shall contain 1 22- #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/display/arm,pl11x.txt b/Documentation/devicetree/bindings/display/arm,pl11x.txt
index ef89ab46b2c9..572fa2773ec4 100644
--- a/Documentation/devicetree/bindings/display/arm,pl11x.txt
+++ b/Documentation/devicetree/bindings/display/arm,pl11x.txt
@@ -1,6 +1,6 @@
1* ARM PrimeCell Color LCD Controller PL110/PL111 1* ARM PrimeCell Color LCD Controller PL110/PL111
2 2
3See also Documentation/devicetree/bindings/arm/primecell.txt 3See also Documentation/devicetree/bindings/arm/primecell.yaml
4 4
5Required properties: 5Required properties:
6 6
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index ac8df3b871f9..f8759145ce1a 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -27,7 +27,6 @@ Example:
27 reg = <0x04300000 0x20000>; 27 reg = <0x04300000 0x20000>;
28 reg-names = "kgsl_3d0_reg_memory"; 28 reg-names = "kgsl_3d0_reg_memory";
29 interrupts = <GIC_SPI 80 0>; 29 interrupts = <GIC_SPI 80 0>;
30 interrupt-names = "kgsl_3d0_irq";
31 clock-names = 30 clock-names =
32 "core", 31 "core",
33 "iface", 32 "iface",
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index 38ca2201e8ae..2e097b57f170 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -14,8 +14,6 @@ Required properties:
14 14
15 "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K 15 "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K
16 SoCs (either from AP or CP), see 16 SoCs (either from AP or CP), see
17 Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
18 and
19 Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt 17 Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
20 for specific details about the offset property. 18 for specific details about the offset property.
21 19
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index b83bb8249074..a3be5298a5eb 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -78,7 +78,7 @@ Sub-nodes:
78PPI affinity can be expressed as a single "ppi-partitions" node, 78PPI affinity can be expressed as a single "ppi-partitions" node,
79containing a set of sub-nodes, each with the following property: 79containing a set of sub-nodes, each with the following property:
80- affinity: Should be a list of phandles to CPU nodes (as described in 80- affinity: Should be a list of phandles to CPU nodes (as described in
81Documentation/devicetree/bindings/arm/cpus.txt). 81 Documentation/devicetree/bindings/arm/cpus.yaml).
82 82
83GICv3 has one or more Interrupt Translation Services (ITS) that are 83GICv3 has one or more Interrupt Translation Services (ITS) that are
84used to route Message Signalled Interrupts (MSI) to the CPUs. 84used to route Message Signalled Interrupts (MSI) to the CPUs.
diff --git a/Documentation/devicetree/bindings/reset/socfpga-reset.txt b/Documentation/devicetree/bindings/reset/socfpga-reset.txt
index 98c9f560e5c5..38fe34fd8b8a 100644
--- a/Documentation/devicetree/bindings/reset/socfpga-reset.txt
+++ b/Documentation/devicetree/bindings/reset/socfpga-reset.txt
@@ -1,7 +1,8 @@
1Altera SOCFPGA Reset Manager 1Altera SOCFPGA Reset Manager
2 2
3Required properties: 3Required properties:
4- compatible : "altr,rst-mgr" 4- compatible : "altr,rst-mgr" for (Cyclone5/Arria5/Arria10)
5 "altr,stratix10-rst-mgr","altr,rst-mgr" for Stratix10 ARM64 SoC
5- reg : Should contain 1 register ranges(address and length) 6- reg : Should contain 1 register ranges(address and length)
6- altr,modrst-offset : Should contain the offset of the first modrst register. 7- altr,modrst-offset : Should contain the offset of the first modrst register.
7- #reset-cells: 1 8- #reset-cells: 1
diff --git a/Documentation/devicetree/bindings/reset/uniphier-reset.txt b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
index 101743dda223..ea005177d20a 100644
--- a/Documentation/devicetree/bindings/reset/uniphier-reset.txt
+++ b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
@@ -120,27 +120,30 @@ Example:
120 }; 120 };
121 121
122 122
123USB3 core reset 123Peripheral core reset in glue layer
124--------------- 124-----------------------------------
125 125
126USB3 core reset belongs to USB3 glue layer. Before using the core reset, 126Some peripheral core reset belongs to its own glue layer. Before using
127it is necessary to control the clocks and resets to enable this layer. 127this core reset, it is necessary to control the clocks and resets to enable
128These clocks and resets should be described in each property. 128this layer. These clocks and resets should be described in each property.
129 129
130Required properties: 130Required properties:
131- compatible: Should be 131- compatible: Should be
132 "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC 132 "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC USB3
133 "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC 133 "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC USB3
134 "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC 134 "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC USB3
135 "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC 135 "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC USB3
136 "socionext,uniphier-pro4-ahci-reset" - for Pro4 SoC AHCI
137 "socionext,uniphier-pxs2-ahci-reset" - for PXs2 SoC AHCI
138 "socionext,uniphier-pxs3-ahci-reset" - for PXs3 SoC AHCI
136- #reset-cells: Should be 1. 139- #reset-cells: Should be 1.
137- reg: Specifies offset and length of the register set for the device. 140- reg: Specifies offset and length of the register set for the device.
138- clocks: A list of phandles to the clock gate for USB3 glue layer. 141- clocks: A list of phandles to the clock gate for the glue layer.
139 According to the clock-names, appropriate clocks are required. 142 According to the clock-names, appropriate clocks are required.
140- clock-names: Should contain 143- clock-names: Should contain
141 "gio", "link" - for Pro4 SoC 144 "gio", "link" - for Pro4 SoC
142 "link" - for others 145 "link" - for others
143- resets: A list of phandles to the reset control for USB3 glue layer. 146- resets: A list of phandles to the reset control for the glue layer.
144 According to the reset-names, appropriate resets are required. 147 According to the reset-names, appropriate resets are required.
145- reset-names: Should contain 148- reset-names: Should contain
146 "gio", "link" - for Pro4 SoC 149 "gio", "link" - for Pro4 SoC
diff --git a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
index 36603419d6f8..0e72183f52bc 100644
--- a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
+++ b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
@@ -4,14 +4,10 @@ Required properties:
4- compatible : "olpc,ap-sp" 4- compatible : "olpc,ap-sp"
5- reg : base address and length of SoC's WTM registers 5- reg : base address and length of SoC's WTM registers
6- interrupts : SP-AP interrupt 6- interrupts : SP-AP interrupt
7- clocks : phandle + clock-specifier for the clock that drives the WTM
8- clock-names: should be "sp"
9 7
10Example: 8Example:
11 ap-sp@d4290000 { 9 ap-sp@d4290000 {
12 compatible = "olpc,ap-sp"; 10 compatible = "olpc,ap-sp";
13 reg = <0xd4290000 0x1000>; 11 reg = <0xd4290000 0x1000>;
14 interrupts = <40>; 12 interrupts = <40>;
15 clocks = <&soc_clocks MMP2_CLK_SP>;
16 clock-names = "sp";
17 } 13 }
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
index 0b8cc533ca83..cf759e5f9b10 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
@@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function
55= EXAMPLE 55= EXAMPLE
56The following example represents the GLINK RPM node on a MSM8996 device, with 56The following example represents the GLINK RPM node on a MSM8996 device, with
57the function for the "rpm_request" channel defined, which is used for 57the function for the "rpm_request" channel defined, which is used for
58regualtors and root clocks. 58regulators and root clocks.
59 59
60 apcs_glb: mailbox@9820000 { 60 apcs_glb: mailbox@9820000 {
61 compatible = "qcom,msm8996-apcs-hmss-global"; 61 compatible = "qcom,msm8996-apcs-hmss-global";
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
index a35af2dafdad..49e1d72d3648 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
@@ -41,12 +41,12 @@ processor ID) and a string identifier.
41- qcom,local-pid: 41- qcom,local-pid:
42 Usage: required 42 Usage: required
43 Value type: <u32> 43 Value type: <u32>
44 Definition: specifies the identfier of the local endpoint of this edge 44 Definition: specifies the identifier of the local endpoint of this edge
45 45
46- qcom,remote-pid: 46- qcom,remote-pid:
47 Usage: required 47 Usage: required
48 Value type: <u32> 48 Value type: <u32>
49 Definition: specifies the identfier of the remote endpoint of this edge 49 Definition: specifies the identifier of the remote endpoint of this edge
50 50
51= SUBNODES 51= SUBNODES
52Each SMP2P pair contain a set of inbound and outbound entries, these are 52Each SMP2P pair contain a set of inbound and outbound entries, these are
diff --git a/Documentation/driver-model/bus.txt b/Documentation/driver-model/bus.txt
index b577a45b93ea..c247b488a567 100644
--- a/Documentation/driver-model/bus.txt
+++ b/Documentation/driver-model/bus.txt
@@ -124,11 +124,11 @@ struct bus_attribute {
124 ssize_t (*store)(struct bus_type *, const char * buf, size_t count); 124 ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
125}; 125};
126 126
127Bus drivers can export attributes using the BUS_ATTR macro that works 127Bus drivers can export attributes using the BUS_ATTR_RW macro that works
128similarly to the DEVICE_ATTR macro for devices. For example, a definition 128similarly to the DEVICE_ATTR_RW macro for devices. For example, a
129like this: 129definition like this:
130 130
131static BUS_ATTR(debug,0644,show_debug,store_debug); 131static BUS_ATTR_RW(debug);
132 132
133is equivalent to declaring: 133is equivalent to declaring:
134 134
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index b277cafce71e..d7d6f01e81ff 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -242,9 +242,11 @@ certainly invest a bit more effort into libata core layer).
242 242
243CLOCK 243CLOCK
244 devm_clk_get() 244 devm_clk_get()
245 devm_clk_get_optional()
245 devm_clk_put() 246 devm_clk_put()
246 devm_clk_hw_register() 247 devm_clk_hw_register()
247 devm_of_clk_add_hw_provider() 248 devm_of_clk_add_hw_provider()
249 devm_clk_hw_register_clkdev()
248 250
249DMA 251DMA
250 dmaenginem_async_device_register() 252 dmaenginem_async_device_register()
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt
index 62af30511a95..60a5ec04e8f0 100644
--- a/Documentation/fb/fbcon.txt
+++ b/Documentation/fb/fbcon.txt
@@ -163,6 +163,14 @@ C. Boot options
163 be preserved until there actually is some text is output to the console. 163 be preserved until there actually is some text is output to the console.
164 This option causes fbcon to bind immediately to the fbdev device. 164 This option causes fbcon to bind immediately to the fbdev device.
165 165
1667. fbcon=logo-pos:<location>
167
168 The only possible 'location' is 'center' (without quotes), and when
169 given, the bootup logo is moved from the default top-left corner
170 location to the center of the framebuffer. If more than one logo is
171 displayed due to multiple CPUs, the collected line of logos is moved
172 as a whole.
173
166C. Attaching, Detaching and Unloading 174C. Attaching, Detaching and Unloading
167 175
168Before going on to how to attach, detach and unload the framebuffer console, an 176Before going on to how to attach, detach and unload the framebuffer console, an
diff --git a/Documentation/features/core/cBPF-JIT/arch-support.txt b/Documentation/features/core/cBPF-JIT/arch-support.txt
index 90459cdde314..8620c38d4db0 100644
--- a/Documentation/features/core/cBPF-JIT/arch-support.txt
+++ b/Documentation/features/core/cBPF-JIT/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/core/eBPF-JIT/arch-support.txt b/Documentation/features/core/eBPF-JIT/arch-support.txt
index c90a0382fe66..9ae6e8d0d10d 100644
--- a/Documentation/features/core/eBPF-JIT/arch-support.txt
+++ b/Documentation/features/core/eBPF-JIT/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/core/generic-idle-thread/arch-support.txt b/Documentation/features/core/generic-idle-thread/arch-support.txt
index 0ef6acdb991c..365df2c2ff0b 100644
--- a/Documentation/features/core/generic-idle-thread/arch-support.txt
+++ b/Documentation/features/core/generic-idle-thread/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | ok |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/core/jump-labels/arch-support.txt b/Documentation/features/core/jump-labels/arch-support.txt
index 60111395f932..7fc2e243dee9 100644
--- a/Documentation/features/core/jump-labels/arch-support.txt
+++ b/Documentation/features/core/jump-labels/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/core/tracehook/arch-support.txt b/Documentation/features/core/tracehook/arch-support.txt
index f44c274e40ed..d344b99aae1e 100644
--- a/Documentation/features/core/tracehook/arch-support.txt
+++ b/Documentation/features/core/tracehook/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | ok | 13 | c6x: | ok |
14 | csky: | ok |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index 282ecc8ea1da..304dcd461795 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/gcov-profile-all/arch-support.txt b/Documentation/features/debug/gcov-profile-all/arch-support.txt
index 01b2b3004e0a..059d58a549c7 100644
--- a/Documentation/features/debug/gcov-profile-all/arch-support.txt
+++ b/Documentation/features/debug/gcov-profile-all/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/kgdb/arch-support.txt b/Documentation/features/debug/kgdb/arch-support.txt
index 3b4dff22329f..3e6b8f07d5d0 100644
--- a/Documentation/features/debug/kgdb/arch-support.txt
+++ b/Documentation/features/debug/kgdb/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | ok | 15 | h8300: | ok |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 7e963d0ae646..68f266944d5f 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/kprobes/arch-support.txt b/Documentation/features/debug/kprobes/arch-support.txt
index 4ada027faf16..f4e45bd58fea 100644
--- a/Documentation/features/debug/kprobes/arch-support.txt
+++ b/Documentation/features/debug/kprobes/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/debug/kretprobes/arch-support.txt b/Documentation/features/debug/kretprobes/arch-support.txt
index 044e13fcca5d..1d5651ef11f8 100644
--- a/Documentation/features/debug/kretprobes/arch-support.txt
+++ b/Documentation/features/debug/kretprobes/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/debug/optprobes/arch-support.txt b/Documentation/features/debug/optprobes/arch-support.txt
index dce7669c918f..fb297a88f62c 100644
--- a/Documentation/features/debug/optprobes/arch-support.txt
+++ b/Documentation/features/debug/optprobes/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt
index 954ac1c95553..9999ea521f3e 100644
--- a/Documentation/features/debug/stackprotector/arch-support.txt
+++ b/Documentation/features/debug/stackprotector/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/uprobes/arch-support.txt b/Documentation/features/debug/uprobes/arch-support.txt
index 1a3f9d3229bf..1c577d0cfc7f 100644
--- a/Documentation/features/debug/uprobes/arch-support.txt
+++ b/Documentation/features/debug/uprobes/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/user-ret-profiler/arch-support.txt b/Documentation/features/debug/user-ret-profiler/arch-support.txt
index 1d78d1069a5f..6bfa36b0e017 100644
--- a/Documentation/features/debug/user-ret-profiler/arch-support.txt
+++ b/Documentation/features/debug/user-ret-profiler/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/io/dma-contiguous/arch-support.txt b/Documentation/features/io/dma-contiguous/arch-support.txt
index 30c072d2b67c..eb28b5c97ca6 100644
--- a/Documentation/features/io/dma-contiguous/arch-support.txt
+++ b/Documentation/features/io/dma-contiguous/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | ok |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/locking/cmpxchg-local/arch-support.txt b/Documentation/features/locking/cmpxchg-local/arch-support.txt
index 51704a2dc8d1..242ff5a6586e 100644
--- a/Documentation/features/locking/cmpxchg-local/arch-support.txt
+++ b/Documentation/features/locking/cmpxchg-local/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/locking/lockdep/arch-support.txt b/Documentation/features/locking/lockdep/arch-support.txt
index bd39c5edd460..941fd5b1094d 100644
--- a/Documentation/features/locking/lockdep/arch-support.txt
+++ b/Documentation/features/locking/lockdep/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/locking/queued-rwlocks/arch-support.txt b/Documentation/features/locking/queued-rwlocks/arch-support.txt
index da7aff3bee0b..c683da198f31 100644
--- a/Documentation/features/locking/queued-rwlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-rwlocks/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | ok |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
index 478e9101322c..e3080b82aefd 100644
--- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/locking/rwsem-optimized/arch-support.txt b/Documentation/features/locking/rwsem-optimized/arch-support.txt
index e54b1f1a8091..7521d7500fbe 100644
--- a/Documentation/features/locking/rwsem-optimized/arch-support.txt
+++ b/Documentation/features/locking/rwsem-optimized/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/perf/kprobes-event/arch-support.txt b/Documentation/features/perf/kprobes-event/arch-support.txt
index 7331402d1887..d8278bf62b85 100644
--- a/Documentation/features/perf/kprobes-event/arch-support.txt
+++ b/Documentation/features/perf/kprobes-event/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/perf/perf-regs/arch-support.txt b/Documentation/features/perf/perf-regs/arch-support.txt
index 53feeee6cdad..687d049d9cee 100644
--- a/Documentation/features/perf/perf-regs/arch-support.txt
+++ b/Documentation/features/perf/perf-regs/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/perf/perf-stackdump/arch-support.txt b/Documentation/features/perf/perf-stackdump/arch-support.txt
index 16164348e0ea..90996e3d18a8 100644
--- a/Documentation/features/perf/perf-stackdump/arch-support.txt
+++ b/Documentation/features/perf/perf-stackdump/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
index c7858dd1ea8f..8a521a622966 100644
--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -34,6 +34,7 @@
34 | arm: | ok | 34 | arm: | ok |
35 | arm64: | ok | 35 | arm64: | ok |
36 | c6x: | TODO | 36 | c6x: | TODO |
37 | csky: | TODO |
37 | h8300: | TODO | 38 | h8300: | TODO |
38 | hexagon: | TODO | 39 | hexagon: | TODO |
39 | ia64: | TODO | 40 | ia64: | TODO |
diff --git a/Documentation/features/sched/numa-balancing/arch-support.txt b/Documentation/features/sched/numa-balancing/arch-support.txt
index c68bb2c2cb62..350823692f28 100644
--- a/Documentation/features/sched/numa-balancing/arch-support.txt
+++ b/Documentation/features/sched/numa-balancing/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | .. | 11 | arm: | .. |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | .. | 13 | c6x: | .. |
14 | csky: | .. |
14 | h8300: | .. | 15 | h8300: | .. |
15 | hexagon: | .. | 16 | hexagon: | .. |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/seccomp/seccomp-filter/arch-support.txt b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
index d4271b493b41..4fe6c3c3be5c 100644
--- a/Documentation/features/seccomp/seccomp-filter/arch-support.txt
+++ b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/time/arch-tick-broadcast/arch-support.txt b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
index 83d9e68462bb..593536f7925b 100644
--- a/Documentation/features/time/arch-tick-broadcast/arch-support.txt
+++ b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/time/clockevents/arch-support.txt b/Documentation/features/time/clockevents/arch-support.txt
index 3d4908fce6da..7a27157da408 100644
--- a/Documentation/features/time/clockevents/arch-support.txt
+++ b/Documentation/features/time/clockevents/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | ok | 13 | c6x: | ok |
14 | csky: | ok |
14 | h8300: | ok | 15 | h8300: | ok |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/time/context-tracking/arch-support.txt b/Documentation/features/time/context-tracking/arch-support.txt
index c29974afffaa..048bfb6d3872 100644
--- a/Documentation/features/time/context-tracking/arch-support.txt
+++ b/Documentation/features/time/context-tracking/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/time/irq-time-acct/arch-support.txt b/Documentation/features/time/irq-time-acct/arch-support.txt
index 8d73c463ec27..a14bbad8e948 100644
--- a/Documentation/features/time/irq-time-acct/arch-support.txt
+++ b/Documentation/features/time/irq-time-acct/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | .. | 17 | ia64: | .. |
diff --git a/Documentation/features/time/modern-timekeeping/arch-support.txt b/Documentation/features/time/modern-timekeeping/arch-support.txt
index e7c6ea6b8fb3..2855dfe2464d 100644
--- a/Documentation/features/time/modern-timekeeping/arch-support.txt
+++ b/Documentation/features/time/modern-timekeeping/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | ok | 13 | c6x: | ok |
14 | csky: | ok |
14 | h8300: | ok | 15 | h8300: | ok |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/time/virt-cpuacct/arch-support.txt b/Documentation/features/time/virt-cpuacct/arch-support.txt
index 4646457461cf..fb0d0cab9cab 100644
--- a/Documentation/features/time/virt-cpuacct/arch-support.txt
+++ b/Documentation/features/time/virt-cpuacct/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/vm/ELF-ASLR/arch-support.txt b/Documentation/features/vm/ELF-ASLR/arch-support.txt
index 1f71d090ff2c..adc25878d217 100644
--- a/Documentation/features/vm/ELF-ASLR/arch-support.txt
+++ b/Documentation/features/vm/ELF-ASLR/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/vm/PG_uncached/arch-support.txt b/Documentation/features/vm/PG_uncached/arch-support.txt
index fbd5aa463b0a..f05588f9e4b4 100644
--- a/Documentation/features/vm/PG_uncached/arch-support.txt
+++ b/Documentation/features/vm/PG_uncached/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/vm/THP/arch-support.txt b/Documentation/features/vm/THP/arch-support.txt
index 5d7ecc378f29..cdfe8925f881 100644
--- a/Documentation/features/vm/THP/arch-support.txt
+++ b/Documentation/features/vm/THP/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | .. | 13 | c6x: | .. |
14 | csky: | .. |
14 | h8300: | .. | 15 | h8300: | .. |
15 | hexagon: | .. | 16 | hexagon: | .. |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/vm/TLB/arch-support.txt b/Documentation/features/vm/TLB/arch-support.txt
index f7af9678eb66..2bdd3b6cee3c 100644
--- a/Documentation/features/vm/TLB/arch-support.txt
+++ b/Documentation/features/vm/TLB/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | .. | 13 | c6x: | .. |
14 | csky: | TODO |
14 | h8300: | .. | 15 | h8300: | .. |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt
index d0713ccc7117..019131c5acce 100644
--- a/Documentation/features/vm/huge-vmap/arch-support.txt
+++ b/Documentation/features/vm/huge-vmap/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/vm/ioremap_prot/arch-support.txt b/Documentation/features/vm/ioremap_prot/arch-support.txt
index 326e4797bc65..3a6b87de6a19 100644
--- a/Documentation/features/vm/ioremap_prot/arch-support.txt
+++ b/Documentation/features/vm/ioremap_prot/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/vm/numa-memblock/arch-support.txt b/Documentation/features/vm/numa-memblock/arch-support.txt
index 1a988052cd24..3004beb0fd71 100644
--- a/Documentation/features/vm/numa-memblock/arch-support.txt
+++ b/Documentation/features/vm/numa-memblock/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | .. | 11 | arm: | .. |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | .. | 13 | c6x: | .. |
14 | csky: | .. |
14 | h8300: | .. | 15 | h8300: | .. |
15 | hexagon: | .. | 16 | hexagon: | .. |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/vm/pte_special/arch-support.txt b/Documentation/features/vm/pte_special/arch-support.txt
index a8378424bc98..2dc5df6a1cf5 100644
--- a/Documentation/features/vm/pte_special/arch-support.txt
+++ b/Documentation/features/vm/pte_special/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index a1426cabcef1..41411b0c60a3 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -344,7 +344,9 @@ struct bus_attribute {
344 344
345Declaring: 345Declaring:
346 346
347BUS_ATTR(_name, _mode, _show, _store) 347static BUS_ATTR_RW(name);
348static BUS_ATTR_RO(name);
349static BUS_ATTR_WO(name);
348 350
349Creation/Removal: 351Creation/Removal:
350 352
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 25170ad7d25b..101f2b2c69ad 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -533,16 +533,12 @@ Bridge VLAN filtering
533 function that the driver has to call for each VLAN the given port is a member 533 function that the driver has to call for each VLAN the given port is a member
534 of. A switchdev object is used to carry the VID and bridge flags. 534 of. A switchdev object is used to carry the VID and bridge flags.
535 535
536- port_fdb_prepare: bridge layer function invoked when the bridge prepares the
537 installation of a Forwarding Database entry. If the operation is not
538 supported, this function should return -EOPNOTSUPP to inform the bridge code
539 to fallback to a software implementation. No hardware setup must be done in
540 this function. See port_fdb_add for this and details.
541
542- port_fdb_add: bridge layer function invoked when the bridge wants to install a 536- port_fdb_add: bridge layer function invoked when the bridge wants to install a
543 Forwarding Database entry, the switch hardware should be programmed with the 537 Forwarding Database entry, the switch hardware should be programmed with the
544 specified address in the specified VLAN Id in the forwarding database 538 specified address in the specified VLAN Id in the forwarding database
545 associated with this VLAN ID 539 associated with this VLAN ID. If the operation is not supported, this
540 function should return -EOPNOTSUPP to inform the bridge code to fallback to
541 a software implementation.
546 542
547Note: VLAN ID 0 corresponds to the port private database, which, in the context 543Note: VLAN ID 0 corresponds to the port private database, which, in the context
548of DSA, would be the its port-based VLAN, used by the associated bridge device. 544of DSA, would be the its port-based VLAN, used by the associated bridge device.
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 6a47629ef8ed..59e86de662cd 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -11,19 +11,19 @@ Contents:
11 batman-adv 11 batman-adv
12 can 12 can
13 can_ucan_protocol 13 can_ucan_protocol
14 dpaa2/index 14 device_drivers/freescale/dpaa2/index
15 e100 15 device_drivers/intel/e100
16 e1000 16 device_drivers/intel/e1000
17 e1000e 17 device_drivers/intel/e1000e
18 fm10k 18 device_drivers/intel/fm10k
19 igb 19 device_drivers/intel/igb
20 igbvf 20 device_drivers/intel/igbvf
21 ixgb 21 device_drivers/intel/ixgb
22 ixgbe 22 device_drivers/intel/ixgbe
23 ixgbevf 23 device_drivers/intel/ixgbevf
24 i40e 24 device_drivers/intel/i40e
25 iavf 25 device_drivers/intel/iavf
26 ice 26 device_drivers/intel/ice
27 kapi 27 kapi
28 z8530book 28 z8530book
29 msg_zerocopy 29 msg_zerocopy
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index fe46d4867e2d..18c1415e7bfa 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -7,7 +7,7 @@ Intro
7===== 7=====
8 8
9The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. 9The MSG_ZEROCOPY flag enables copy avoidance for socket send calls.
10The feature is currently implemented for TCP sockets. 10The feature is currently implemented for TCP and UDP sockets.
11 11
12 12
13Opportunity and Caveats 13Opportunity and Caveats
diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt
index 355c6d8ef8ad..b203d1334822 100644
--- a/Documentation/networking/operstates.txt
+++ b/Documentation/networking/operstates.txt
@@ -22,8 +22,9 @@ and changeable from userspace under certain rules.
222. Querying from userspace 222. Querying from userspace
23 23
24Both admin and operational state can be queried via the netlink 24Both admin and operational state can be queried via the netlink
25operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK 25operation RTM_GETLINK. It is also possible to subscribe to RTNLGRP_LINK
26to be notified of updates. This is important for setting from userspace. 26to be notified of updates while the interface is admin up. This is
27important for setting from userspace.
27 28
28These values contain interface state: 29These values contain interface state:
29 30
@@ -101,8 +102,9 @@ because some driver controlled protocol establishment has to
101complete. Corresponding functions are netif_dormant_on() to set the 102complete. Corresponding functions are netif_dormant_on() to set the
102flag, netif_dormant_off() to clear it and netif_dormant() to query. 103flag, netif_dormant_off() to clear it and netif_dormant() to query.
103 104
104On device allocation, networking core sets the flags equivalent to 105On device allocation, both flags __LINK_STATE_NOCARRIER and
105netif_carrier_ok() and !netif_dormant(). 106__LINK_STATE_DORMANT are cleared, so the effective state is equivalent
107to netif_carrier_ok() and !netif_dormant().
106 108
107 109
108Whenever the driver CHANGES one of these flags, a workqueue event is 110Whenever the driver CHANGES one of these flags, a workqueue event is
@@ -133,11 +135,11 @@ netif_carrier_ok() && !netif_dormant() is set by the
133driver. Afterwards, the userspace application can set IFLA_OPERSTATE 135driver. Afterwards, the userspace application can set IFLA_OPERSTATE
134to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set 136to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set
135netif_carrier_off() or netif_dormant_on(). Changes made by userspace 137netif_carrier_off() or netif_dormant_on(). Changes made by userspace
136are multicasted on the netlink group RTMGRP_LINK. 138are multicasted on the netlink group RTNLGRP_LINK.
137 139
138So basically a 802.1X supplicant interacts with the kernel like this: 140So basically a 802.1X supplicant interacts with the kernel like this:
139 141
140-subscribe to RTMGRP_LINK 142-subscribe to RTNLGRP_LINK
141-set IFLA_LINKMODE to 1 via RTM_SETLINK 143-set IFLA_LINKMODE to 1 via RTM_SETLINK
142-query RTM_GETLINK once to get initial state 144-query RTM_GETLINK once to get initial state
143-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until 145-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index c9d052e0cf51..2df5894353d6 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -1000,51 +1000,6 @@ The kernel interface functions are as follows:
1000 size should be set when the call is begun. tx_total_len may not be less 1000 size should be set when the call is begun. tx_total_len may not be less
1001 than zero. 1001 than zero.
1002 1002
1003 (*) Check to see the completion state of a call so that the caller can assess
1004 whether it needs to be retried.
1005
1006 enum rxrpc_call_completion {
1007 RXRPC_CALL_SUCCEEDED,
1008 RXRPC_CALL_REMOTELY_ABORTED,
1009 RXRPC_CALL_LOCALLY_ABORTED,
1010 RXRPC_CALL_LOCAL_ERROR,
1011 RXRPC_CALL_NETWORK_ERROR,
1012 };
1013
1014 int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
1015 enum rxrpc_call_completion *_compl,
1016 u32 *_abort_code);
1017
1018 On return, -EINPROGRESS will be returned if the call is still ongoing; if
1019 it is finished, *_compl will be set to indicate the manner of completion,
1020 *_abort_code will be set to any abort code that occurred. 0 will be
1021 returned on a successful completion, -ECONNABORTED will be returned if the
1022 client failed due to a remote abort and anything else will return an
1023 appropriate error code.
1024
1025 The caller should look at this information to decide if it's worth
1026 retrying the call.
1027
1028 (*) Retry a client call.
1029
1030 int rxrpc_kernel_retry_call(struct socket *sock,
1031 struct rxrpc_call *call,
1032 struct sockaddr_rxrpc *srx,
1033 struct key *key);
1034
1035 This attempts to partially reinitialise a call and submit it again while
1036 reusing the original call's Tx queue to avoid the need to repackage and
1037 re-encrypt the data to be sent. call indicates the call to retry, srx the
1038 new address to send it to and key the encryption key to use for signing or
1039 encrypting the packets.
1040
1041 For this to work, the first Tx data packet must still be in the transmit
1042 queue, and currently this is only permitted for local and network errors
1043 and the call must not have been aborted. Any partially constructed Tx
1044 packet is left as is and can continue being filled afterwards.
1045
1046 It returns 0 if the call was requeued and an error otherwise.
1047
1048 (*) Get call RTT. 1003 (*) Get call RTT.
1049 1004
1050 u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call); 1005 u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call);
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
index b0dfdaaca512..fe8f741193be 100644
--- a/Documentation/networking/snmp_counter.rst
+++ b/Documentation/networking/snmp_counter.rst
@@ -336,7 +336,26 @@ time client replies ACK, this socket will get another chance to move
336to the accept queue. 336to the accept queue.
337 337
338 338
339TCP Fast Open 339* TcpEstabResets
340Defined in `RFC1213 tcpEstabResets`_.
341
342.. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48
343
344* TcpAttemptFails
345Defined in `RFC1213 tcpAttemptFails`_.
346
347.. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48
348
349* TcpOutRsts
350Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates
351the 'segments sent containing the RST flag', but in linux kernel, this
352couner indicates the segments kerenl tried to send. The sending
353process might be failed due to some errors (e.g. memory alloc failed).
354
355.. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52
356
357
358TCP Fast Path
340============ 359============
341When kernel receives a TCP packet, it has two paths to handler the 360When kernel receives a TCP packet, it has two paths to handler the
342packet, one is fast path, another is slow path. The comment in kernel 361packet, one is fast path, another is slow path. The comment in kernel
@@ -383,8 +402,6 @@ increase 1.
383 402
384TCP abort 403TCP abort
385======== 404========
386
387
388* TcpExtTCPAbortOnData 405* TcpExtTCPAbortOnData
389It means TCP layer has data in flight, but need to close the 406It means TCP layer has data in flight, but need to close the
390connection. So TCP layer sends a RST to the other side, indicate the 407connection. So TCP layer sends a RST to the other side, indicate the
@@ -545,7 +562,6 @@ packet yet, the sender would know packet 4 is out of order. The TCP
545stack of kernel will increase TcpExtTCPSACKReorder for both of the 562stack of kernel will increase TcpExtTCPSACKReorder for both of the
546above scenarios. 563above scenarios.
547 564
548
549DSACK 565DSACK
550===== 566=====
551The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report 567The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report
@@ -566,13 +582,63 @@ The TCP stack receives an out of order duplicate packet, so it sends a
566DSACK to the sender. 582DSACK to the sender.
567 583
568* TcpExtTCPDSACKRecv 584* TcpExtTCPDSACKRecv
569The TCP stack receives a DSACK, which indicate an acknowledged 585The TCP stack receives a DSACK, which indicates an acknowledged
570duplicate packet is received. 586duplicate packet is received.
571 587
572* TcpExtTCPDSACKOfoRecv 588* TcpExtTCPDSACKOfoRecv
573The TCP stack receives a DSACK, which indicate an out of order 589The TCP stack receives a DSACK, which indicate an out of order
574duplicate packet is received. 590duplicate packet is received.
575 591
592invalid SACK and DSACK
593====================
594When a SACK (or DSACK) block is invalid, a corresponding counter would
595be updated. The validation method is base on the start/end sequence
596number of the SACK block. For more details, please refer the comment
597of the function tcp_is_sackblock_valid in the kernel source code. A
598SACK option could have up to 4 blocks, they are checked
599individually. E.g., if 3 blocks of a SACk is invalid, the
600corresponding counter would be updated 3 times. The comment of the
601`Add counters for discarded SACK blocks`_ patch has additional
602explaination:
603
604.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
605
606* TcpExtTCPSACKDiscard
607This counter indicates how many SACK blocks are invalid. If the invalid
608SACK block is caused by ACK recording, the TCP stack will only ignore
609it and won't update this counter.
610
611* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
612When a DSACK block is invalid, one of these two counters would be
613updated. Which counter will be updated depends on the undo_marker flag
614of the TCP socket. If the undo_marker is not set, the TCP stack isn't
615likely to re-transmit any packets, and we still receive an invalid
616DSACK block, the reason might be that the packet is duplicated in the
617middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo
618will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
619will be updated. As implied in its name, it might be an old packet.
620
621SACK shift
622=========
623The linux networking stack stores data in sk_buff struct (skb for
624short). If a SACK block acrosses multiple skb, the TCP stack will try
625to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
62610 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and
62715 in skb2 would be moved to skb1. This operation is 'shift'. If a
628SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has
629seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
630discard, this operation is 'merge'.
631
632* TcpExtTCPSackShifted
633A skb is shifted
634
635* TcpExtTCPSackMerged
636A skb is merged
637
638* TcpExtTCPSackShiftFallback
639A skb should be shifted or merged, but the TCP stack doesn't do it for
640some reasons.
641
576TCP out of order 642TCP out of order
577=============== 643===============
578* TcpExtTCPOFOQueue 644* TcpExtTCPOFOQueue
@@ -662,6 +728,60 @@ unacknowledged number (more strict than `RFC 5961 section 5.2`_).
662.. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9 728.. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9
663.. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11 729.. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11
664 730
731TCP receive window
732=================
733* TcpExtTCPWantZeroWindowAdv
734Depending on current memory usage, the TCP stack tries to set receive
735window to zero. But the receive window might still be a no-zero
736value. For example, if the previous window size is 10, and the TCP
737stack receives 3 bytes, the current window size would be 7 even if the
738window size calculated by the memory usage is zero.
739
740* TcpExtTCPToZeroWindowAdv
741The TCP receive window is set to zero from a no-zero value.
742
743* TcpExtTCPFromZeroWindowAdv
744The TCP receive window is set to no-zero value from zero.
745
746
747Delayed ACK
748==========
749The TCP Delayed ACK is a technique which is used for reducing the
750packet count in the network. For more details, please refer the
751`Delayed ACK wiki`_
752
753.. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment
754
755* TcpExtDelayedACKs
756A delayed ACK timer expires. The TCP stack will send a pure ACK packet
757and exit the delayed ACK mode.
758
759* TcpExtDelayedACKLocked
760A delayed ACK timer expires, but the TCP stack can't send an ACK
761immediately due to the socket is locked by a userspace program. The
762TCP stack will send a pure ACK later (after the userspace program
763unlock the socket). When the TCP stack sends the pure ACK later, the
764TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK
765mode.
766
767* TcpExtDelayedACKLost
768It will be updated when the TCP stack receives a packet which has been
769ACKed. A Delayed ACK loss might cause this issue, but it would also be
770triggered by other reasons, such as a packet is duplicated in the
771network.
772
773Tail Loss Probe (TLP)
774===================
775TLP is an algorithm which is used to detect TCP packet loss. For more
776details, please refer the `TLP paper`_.
777
778.. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01
779
780* TcpExtTCPLossProbes
781A TLP probe packet is sent.
782
783* TcpExtTCPLossProbeRecovery
784A packet loss is detected and recovered by TLP.
665 785
666examples 786examples
667======= 787=======
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 82236a17b5e6..97b7ca8b9b86 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -92,11 +92,11 @@ device.
92Switch ID 92Switch ID
93^^^^^^^^^ 93^^^^^^^^^
94 94
95The switchdev driver must implement the switchdev op switchdev_port_attr_get 95The switchdev driver must implement the net_device operation
96for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same 96ndo_get_port_parent_id for each port netdev, returning the same physical ID for
97physical ID for each port of a switch. The ID must be unique between switches 97each port of a switch. The ID must be unique between switches on the same
98on the same system. The ID does not need to be unique between switches on 98system. The ID does not need to be unique between switches on different
99different systems. 99systems.
100 100
101The switch ID is used to locate ports on a switch and to know if aggregated 101The switch ID is used to locate ports on a switch and to know if aggregated
102ports belong to the same switch. 102ports belong to the same switch.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 1be0b6f9e0cb..9d1432e0aaa8 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -417,7 +417,7 @@ is again deprecated and ts[2] holds a hardware timestamp if set.
417 417
418Hardware time stamping must also be initialized for each device driver 418Hardware time stamping must also be initialized for each device driver
419that is expected to do hardware time stamping. The parameter is defined in 419that is expected to do hardware time stamping. The parameter is defined in
420/include/linux/net_tstamp.h as: 420include/uapi/linux/net_tstamp.h as:
421 421
422struct hwtstamp_config { 422struct hwtstamp_config {
423 int flags; /* no flags defined right now, must be zero */ 423 int flags; /* no flags defined right now, must be zero */
@@ -487,7 +487,7 @@ enum {
487 HWTSTAMP_FILTER_PTP_V1_L4_EVENT, 487 HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
488 488
489 /* for the complete list of values, please check 489 /* for the complete list of values, please check
490 * the include file /include/linux/net_tstamp.h 490 * the include file include/uapi/linux/net_tstamp.h
491 */ 491 */
492}; 492};
493 493
diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst
index dc2ddc345044..fbb9297e6360 100644
--- a/Documentation/process/applying-patches.rst
+++ b/Documentation/process/applying-patches.rst
@@ -216,14 +216,14 @@ You can use the ``interdiff`` program (http://cyberelk.net/tim/patchutils/) to
216generate a patch representing the differences between two patches and then 216generate a patch representing the differences between two patches and then
217apply the result. 217apply the result.
218 218
219This will let you move from something like 4.7.2 to 4.7.3 in a single 219This will let you move from something like 5.7.2 to 5.7.3 in a single
220step. The -z flag to interdiff will even let you feed it patches in gzip or 220step. The -z flag to interdiff will even let you feed it patches in gzip or
221bzip2 compressed form directly without the use of zcat or bzcat or manual 221bzip2 compressed form directly without the use of zcat or bzcat or manual
222decompression. 222decompression.
223 223
224Here's how you'd go from 4.7.2 to 4.7.3 in a single step:: 224Here's how you'd go from 5.7.2 to 5.7.3 in a single step::
225 225
226 interdiff -z ../patch-4.7.2.gz ../patch-4.7.3.gz | patch -p1 226 interdiff -z ../patch-5.7.2.gz ../patch-5.7.3.gz | patch -p1
227 227
228Although interdiff may save you a step or two you are generally advised to 228Although interdiff may save you a step or two you are generally advised to
229do the additional steps since interdiff can get things wrong in some cases. 229do the additional steps since interdiff can get things wrong in some cases.
@@ -245,62 +245,67 @@ The patches are available at http://kernel.org/
245Most recent patches are linked from the front page, but they also have 245Most recent patches are linked from the front page, but they also have
246specific homes. 246specific homes.
247 247
248The 4.x.y (-stable) and 4.x patches live at 248The 5.x.y (-stable) and 5.x patches live at
249 249
250 https://www.kernel.org/pub/linux/kernel/v4.x/ 250 https://www.kernel.org/pub/linux/kernel/v5.x/
251 251
252The -rc patches live at 252The -rc patches are not stored on the webserver but are generated on
253demand from git tags such as
253 254
254 https://www.kernel.org/pub/linux/kernel/v4.x/testing/ 255 https://git.kernel.org/torvalds/p/v5.1-rc1/v5.0
255 256
257The stable -rc patches live at
256 258
257The 4.x kernels 259 https://www.kernel.org/pub/linux/kernel/v5.x/stable-review/
260
261
262The 5.x kernels
258=============== 263===============
259 264
260These are the base stable releases released by Linus. The highest numbered 265These are the base stable releases released by Linus. The highest numbered
261release is the most recent. 266release is the most recent.
262 267
263If regressions or other serious flaws are found, then a -stable fix patch 268If regressions or other serious flaws are found, then a -stable fix patch
264will be released (see below) on top of this base. Once a new 4.x base 269will be released (see below) on top of this base. Once a new 5.x base
265kernel is released, a patch is made available that is a delta between the 270kernel is released, a patch is made available that is a delta between the
266previous 4.x kernel and the new one. 271previous 5.x kernel and the new one.
267 272
268To apply a patch moving from 4.6 to 4.7, you'd do the following (note 273To apply a patch moving from 5.6 to 5.7, you'd do the following (note
269that such patches do **NOT** apply on top of 4.x.y kernels but on top of the 274that such patches do **NOT** apply on top of 5.x.y kernels but on top of the
270base 4.x kernel -- if you need to move from 4.x.y to 4.x+1 you need to 275base 5.x kernel -- if you need to move from 5.x.y to 5.x+1 you need to
271first revert the 4.x.y patch). 276first revert the 5.x.y patch).
272 277
273Here are some examples:: 278Here are some examples::
274 279
275 # moving from 4.6 to 4.7 280 # moving from 5.6 to 5.7
276 281
277 $ cd ~/linux-4.6 # change to kernel source dir 282 $ cd ~/linux-5.6 # change to kernel source dir
278 $ patch -p1 < ../patch-4.7 # apply the 4.7 patch 283 $ patch -p1 < ../patch-5.7 # apply the 5.7 patch
279 $ cd .. 284 $ cd ..
280 $ mv linux-4.6 linux-4.7 # rename source dir 285 $ mv linux-5.6 linux-5.7 # rename source dir
281 286
282 # moving from 4.6.1 to 4.7 287 # moving from 5.6.1 to 5.7
283 288
284 $ cd ~/linux-4.6.1 # change to kernel source dir 289 $ cd ~/linux-5.6.1 # change to kernel source dir
285 $ patch -p1 -R < ../patch-4.6.1 # revert the 4.6.1 patch 290 $ patch -p1 -R < ../patch-5.6.1 # revert the 5.6.1 patch
286 # source dir is now 4.6 291 # source dir is now 5.6
287 $ patch -p1 < ../patch-4.7 # apply new 4.7 patch 292 $ patch -p1 < ../patch-5.7 # apply new 5.7 patch
288 $ cd .. 293 $ cd ..
289 $ mv linux-4.6.1 linux-4.7 # rename source dir 294 $ mv linux-5.6.1 linux-5.7 # rename source dir
290 295
291 296
292The 4.x.y kernels 297The 5.x.y kernels
293================= 298=================
294 299
295Kernels with 3-digit versions are -stable kernels. They contain small(ish) 300Kernels with 3-digit versions are -stable kernels. They contain small(ish)
296critical fixes for security problems or significant regressions discovered 301critical fixes for security problems or significant regressions discovered
297in a given 4.x kernel. 302in a given 5.x kernel.
298 303
299This is the recommended branch for users who want the most recent stable 304This is the recommended branch for users who want the most recent stable
300kernel and are not interested in helping test development/experimental 305kernel and are not interested in helping test development/experimental
301versions. 306versions.
302 307
303If no 4.x.y kernel is available, then the highest numbered 4.x kernel is 308If no 5.x.y kernel is available, then the highest numbered 5.x kernel is
304the current stable kernel. 309the current stable kernel.
305 310
306.. note:: 311.. note::
@@ -308,23 +313,23 @@ the current stable kernel.
308 The -stable team usually do make incremental patches available as well 313 The -stable team usually do make incremental patches available as well
309 as patches against the latest mainline release, but I only cover the 314 as patches against the latest mainline release, but I only cover the
310 non-incremental ones below. The incremental ones can be found at 315 non-incremental ones below. The incremental ones can be found at
311 https://www.kernel.org/pub/linux/kernel/v4.x/incr/ 316 https://www.kernel.org/pub/linux/kernel/v5.x/incr/
312 317
313These patches are not incremental, meaning that for example the 4.7.3 318These patches are not incremental, meaning that for example the 5.7.3
314patch does not apply on top of the 4.7.2 kernel source, but rather on top 319patch does not apply on top of the 5.7.2 kernel source, but rather on top
315of the base 4.7 kernel source. 320of the base 5.7 kernel source.
316 321
317So, in order to apply the 4.7.3 patch to your existing 4.7.2 kernel 322So, in order to apply the 5.7.3 patch to your existing 5.7.2 kernel
318source you have to first back out the 4.7.2 patch (so you are left with a 323source you have to first back out the 5.7.2 patch (so you are left with a
319base 4.7 kernel source) and then apply the new 4.7.3 patch. 324base 5.7 kernel source) and then apply the new 5.7.3 patch.
320 325
321Here's a small example:: 326Here's a small example::
322 327
323 $ cd ~/linux-4.7.2 # change to the kernel source dir 328 $ cd ~/linux-5.7.2 # change to the kernel source dir
324 $ patch -p1 -R < ../patch-4.7.2 # revert the 4.7.2 patch 329 $ patch -p1 -R < ../patch-5.7.2 # revert the 5.7.2 patch
325 $ patch -p1 < ../patch-4.7.3 # apply the new 4.7.3 patch 330 $ patch -p1 < ../patch-5.7.3 # apply the new 5.7.3 patch
326 $ cd .. 331 $ cd ..
327 $ mv linux-4.7.2 linux-4.7.3 # rename the kernel source dir 332 $ mv linux-5.7.2 linux-5.7.3 # rename the kernel source dir
328 333
329The -rc kernels 334The -rc kernels
330=============== 335===============
@@ -343,38 +348,38 @@ This is a good branch to run for people who want to help out testing
343development kernels but do not want to run some of the really experimental 348development kernels but do not want to run some of the really experimental
344stuff (such people should see the sections about -next and -mm kernels below). 349stuff (such people should see the sections about -next and -mm kernels below).
345 350
346The -rc patches are not incremental, they apply to a base 4.x kernel, just 351The -rc patches are not incremental, they apply to a base 5.x kernel, just
347like the 4.x.y patches described above. The kernel version before the -rcN 352like the 5.x.y patches described above. The kernel version before the -rcN
348suffix denotes the version of the kernel that this -rc kernel will eventually 353suffix denotes the version of the kernel that this -rc kernel will eventually
349turn into. 354turn into.
350 355
351So, 4.8-rc5 means that this is the fifth release candidate for the 4.8 356So, 5.8-rc5 means that this is the fifth release candidate for the 5.8
352kernel and the patch should be applied on top of the 4.7 kernel source. 357kernel and the patch should be applied on top of the 5.7 kernel source.
353 358
354Here are 3 examples of how to apply these patches:: 359Here are 3 examples of how to apply these patches::
355 360
356 # first an example of moving from 4.7 to 4.8-rc3 361 # first an example of moving from 5.7 to 5.8-rc3
357 362
358 $ cd ~/linux-4.7 # change to the 4.7 source dir 363 $ cd ~/linux-5.7 # change to the 5.7 source dir
359 $ patch -p1 < ../patch-4.8-rc3 # apply the 4.8-rc3 patch 364 $ patch -p1 < ../patch-5.8-rc3 # apply the 5.8-rc3 patch
360 $ cd .. 365 $ cd ..
361 $ mv linux-4.7 linux-4.8-rc3 # rename the source dir 366 $ mv linux-5.7 linux-5.8-rc3 # rename the source dir
362 367
363 # now let's move from 4.8-rc3 to 4.8-rc5 368 # now let's move from 5.8-rc3 to 5.8-rc5
364 369
365 $ cd ~/linux-4.8-rc3 # change to the 4.8-rc3 dir 370 $ cd ~/linux-5.8-rc3 # change to the 5.8-rc3 dir
366 $ patch -p1 -R < ../patch-4.8-rc3 # revert the 4.8-rc3 patch 371 $ patch -p1 -R < ../patch-5.8-rc3 # revert the 5.8-rc3 patch
367 $ patch -p1 < ../patch-4.8-rc5 # apply the new 4.8-rc5 patch 372 $ patch -p1 < ../patch-5.8-rc5 # apply the new 5.8-rc5 patch
368 $ cd .. 373 $ cd ..
369 $ mv linux-4.8-rc3 linux-4.8-rc5 # rename the source dir 374 $ mv linux-5.8-rc3 linux-5.8-rc5 # rename the source dir
370 375
371 # finally let's try and move from 4.7.3 to 4.8-rc5 376 # finally let's try and move from 5.7.3 to 5.8-rc5
372 377
373 $ cd ~/linux-4.7.3 # change to the kernel source dir 378 $ cd ~/linux-5.7.3 # change to the kernel source dir
374 $ patch -p1 -R < ../patch-4.7.3 # revert the 4.7.3 patch 379 $ patch -p1 -R < ../patch-5.7.3 # revert the 5.7.3 patch
375 $ patch -p1 < ../patch-4.8-rc5 # apply new 4.8-rc5 patch 380 $ patch -p1 < ../patch-5.8-rc5 # apply new 5.8-rc5 patch
376 $ cd .. 381 $ cd ..
377 $ mv linux-4.7.3 linux-4.8-rc5 # rename the kernel source dir 382 $ mv linux-5.7.3 linux-5.8-rc5 # rename the kernel source dir
378 383
379 384
380The -mm patches and the linux-next tree 385The -mm patches and the linux-next tree
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 819caf8ca05f..ebc679bcb2dc 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -56,26 +56,34 @@ of any kernel data structures.
56 56
57dentry-state: 57dentry-state:
58 58
59From linux/fs/dentry.c: 59From linux/include/linux/dcache.h:
60-------------------------------------------------------------- 60--------------------------------------------------------------
61struct { 61struct dentry_stat_t dentry_stat {
62 int nr_dentry; 62 int nr_dentry;
63 int nr_unused; 63 int nr_unused;
64 int age_limit; /* age in seconds */ 64 int age_limit; /* age in seconds */
65 int want_pages; /* pages requested by system */ 65 int want_pages; /* pages requested by system */
66 int dummy[2]; 66 int nr_negative; /* # of unused negative dentries */
67} dentry_stat = {0, 0, 45, 0,}; 67 int dummy; /* Reserved for future use */
68-------------------------------------------------------------- 68};
69 69--------------------------------------------------------------
70Dentries are dynamically allocated and deallocated, and 70
71nr_dentry seems to be 0 all the time. Hence it's safe to 71Dentries are dynamically allocated and deallocated.
72assume that only nr_unused, age_limit and want_pages are 72
73used. Nr_unused seems to be exactly what its name says. 73nr_dentry shows the total number of dentries allocated (active
74+ unused). nr_unused shows the number of dentries that are not
75actively used, but are saved in the LRU list for future reuse.
76
74Age_limit is the age in seconds after which dcache entries 77Age_limit is the age in seconds after which dcache entries
75can be reclaimed when memory is short and want_pages is 78can be reclaimed when memory is short and want_pages is
76nonzero when shrink_dcache_pages() has been called and the 79nonzero when shrink_dcache_pages() has been called and the
77dcache isn't pruned yet. 80dcache isn't pruned yet.
78 81
82nr_negative shows the number of unused dentries that are also
83negative dentries which do not map to any files. Instead,
84they help speeding up rejection of non-existing files provided
85by the users.
86
79============================================================== 87==============================================================
80 88
81dquot-max & dquot-nr: 89dquot-max & dquot-nr:
diff --git a/Documentation/trace/coresight-cpu-debug.txt b/Documentation/trace/coresight-cpu-debug.txt
index 89ab09e78e8d..f07e38094b40 100644
--- a/Documentation/trace/coresight-cpu-debug.txt
+++ b/Documentation/trace/coresight-cpu-debug.txt
@@ -165,7 +165,7 @@ Do some work...
165The same can also be done from an application program. 165The same can also be done from an application program.
166 166
167Disable specific CPU's specific idle state from cpuidle sysfs (see 167Disable specific CPU's specific idle state from cpuidle sysfs (see
168Documentation/cpuidle/sysfs.txt): 168Documentation/admin-guide/pm/cpuidle.rst):
169# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable 169# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable
170 170
171 171
diff --git a/Documentation/translations/it_IT/admin-guide/README.rst b/Documentation/translations/it_IT/admin-guide/README.rst
index 80f5ffc94a9e..b37166817842 100644
--- a/Documentation/translations/it_IT/admin-guide/README.rst
+++ b/Documentation/translations/it_IT/admin-guide/README.rst
@@ -4,7 +4,7 @@
4 4
5.. _it_readme: 5.. _it_readme:
6 6
7Rilascio del kernel Linux 4.x <http://kernel.org/> 7Rilascio del kernel Linux 5.x <http://kernel.org/>
8=================================================== 8===================================================
9 9
10.. warning:: 10.. warning::
diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virtual/kvm/amd-memory-encryption.rst
index 71d6d257074f..659bbc093b52 100644
--- a/Documentation/virtual/kvm/amd-memory-encryption.rst
+++ b/Documentation/virtual/kvm/amd-memory-encryption.rst
@@ -242,6 +242,6 @@ References
242========== 242==========
243 243
244.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf 244.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf
245.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf 245.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM_API_Specification.pdf
246.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34) 246.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34)
247.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf 247.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf
diff --git a/Documentation/x86/resctrl_ui.txt b/Documentation/x86/resctrl_ui.txt
index d9aed8303984..c1f95b59e14d 100644
--- a/Documentation/x86/resctrl_ui.txt
+++ b/Documentation/x86/resctrl_ui.txt
@@ -9,7 +9,7 @@ Fenghua Yu <fenghua.yu@intel.com>
9Tony Luck <tony.luck@intel.com> 9Tony Luck <tony.luck@intel.com>
10Vikas Shivappa <vikas.shivappa@intel.com> 10Vikas Shivappa <vikas.shivappa@intel.com>
11 11
12This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo 12This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo
13flag bits: 13flag bits:
14RDT (Resource Director Technology) Allocation - "rdt_a" 14RDT (Resource Director Technology) Allocation - "rdt_a"
15CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" 15CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
diff --git a/MAINTAINERS b/MAINTAINERS
index 32d444476a90..dce5c099f43c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -409,8 +409,7 @@ F: drivers/platform/x86/wmi.c
409F: include/uapi/linux/wmi.h 409F: include/uapi/linux/wmi.h
410 410
411AD1889 ALSA SOUND DRIVER 411AD1889 ALSA SOUND DRIVER
412M: Thibaut Varene <T-Bone@parisc-linux.org> 412W: https://parisc.wiki.kernel.org/index.php/AD1889
413W: http://wiki.parisc-linux.org/AD1889
414L: linux-parisc@vger.kernel.org 413L: linux-parisc@vger.kernel.org
415S: Maintained 414S: Maintained
416F: sound/pci/ad1889.* 415F: sound/pci/ad1889.*
@@ -2848,8 +2847,11 @@ F: include/uapi/linux/if_bonding.h
2848BPF (Safe dynamic programs and tools) 2847BPF (Safe dynamic programs and tools)
2849M: Alexei Starovoitov <ast@kernel.org> 2848M: Alexei Starovoitov <ast@kernel.org>
2850M: Daniel Borkmann <daniel@iogearbox.net> 2849M: Daniel Borkmann <daniel@iogearbox.net>
2850R: Martin KaFai Lau <kafai@fb.com>
2851R: Song Liu <songliubraving@fb.com>
2852R: Yonghong Song <yhs@fb.com>
2851L: netdev@vger.kernel.org 2853L: netdev@vger.kernel.org
2852L: linux-kernel@vger.kernel.org 2854L: bpf@vger.kernel.org
2853T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git 2855T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
2854T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git 2856T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
2855Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 2857Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
@@ -2873,10 +2875,13 @@ F: samples/bpf/
2873F: tools/bpf/ 2875F: tools/bpf/
2874F: tools/lib/bpf/ 2876F: tools/lib/bpf/
2875F: tools/testing/selftests/bpf/ 2877F: tools/testing/selftests/bpf/
2878K: bpf
2879N: bpf
2876 2880
2877BPF JIT for ARM 2881BPF JIT for ARM
2878M: Shubham Bansal <illusionist.neo@gmail.com> 2882M: Shubham Bansal <illusionist.neo@gmail.com>
2879L: netdev@vger.kernel.org 2883L: netdev@vger.kernel.org
2884L: bpf@vger.kernel.org
2880S: Maintained 2885S: Maintained
2881F: arch/arm/net/ 2886F: arch/arm/net/
2882 2887
@@ -2885,18 +2890,21 @@ M: Daniel Borkmann <daniel@iogearbox.net>
2885M: Alexei Starovoitov <ast@kernel.org> 2890M: Alexei Starovoitov <ast@kernel.org>
2886M: Zi Shen Lim <zlim.lnx@gmail.com> 2891M: Zi Shen Lim <zlim.lnx@gmail.com>
2887L: netdev@vger.kernel.org 2892L: netdev@vger.kernel.org
2893L: bpf@vger.kernel.org
2888S: Supported 2894S: Supported
2889F: arch/arm64/net/ 2895F: arch/arm64/net/
2890 2896
2891BPF JIT for MIPS (32-BIT AND 64-BIT) 2897BPF JIT for MIPS (32-BIT AND 64-BIT)
2892M: Paul Burton <paul.burton@mips.com> 2898M: Paul Burton <paul.burton@mips.com>
2893L: netdev@vger.kernel.org 2899L: netdev@vger.kernel.org
2900L: bpf@vger.kernel.org
2894S: Maintained 2901S: Maintained
2895F: arch/mips/net/ 2902F: arch/mips/net/
2896 2903
2897BPF JIT for NFP NICs 2904BPF JIT for NFP NICs
2898M: Jakub Kicinski <jakub.kicinski@netronome.com> 2905M: Jakub Kicinski <jakub.kicinski@netronome.com>
2899L: netdev@vger.kernel.org 2906L: netdev@vger.kernel.org
2907L: bpf@vger.kernel.org
2900S: Supported 2908S: Supported
2901F: drivers/net/ethernet/netronome/nfp/bpf/ 2909F: drivers/net/ethernet/netronome/nfp/bpf/
2902 2910
@@ -2904,6 +2912,7 @@ BPF JIT for POWERPC (32-BIT AND 64-BIT)
2904M: Naveen N. Rao <naveen.n.rao@linux.ibm.com> 2912M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
2905M: Sandipan Das <sandipan@linux.ibm.com> 2913M: Sandipan Das <sandipan@linux.ibm.com>
2906L: netdev@vger.kernel.org 2914L: netdev@vger.kernel.org
2915L: bpf@vger.kernel.org
2907S: Maintained 2916S: Maintained
2908F: arch/powerpc/net/ 2917F: arch/powerpc/net/
2909 2918
@@ -2911,6 +2920,7 @@ BPF JIT for S390
2911M: Martin Schwidefsky <schwidefsky@de.ibm.com> 2920M: Martin Schwidefsky <schwidefsky@de.ibm.com>
2912M: Heiko Carstens <heiko.carstens@de.ibm.com> 2921M: Heiko Carstens <heiko.carstens@de.ibm.com>
2913L: netdev@vger.kernel.org 2922L: netdev@vger.kernel.org
2923L: bpf@vger.kernel.org
2914S: Maintained 2924S: Maintained
2915F: arch/s390/net/ 2925F: arch/s390/net/
2916X: arch/s390/net/pnet.c 2926X: arch/s390/net/pnet.c
@@ -2918,12 +2928,14 @@ X: arch/s390/net/pnet.c
2918BPF JIT for SPARC (32-BIT AND 64-BIT) 2928BPF JIT for SPARC (32-BIT AND 64-BIT)
2919M: David S. Miller <davem@davemloft.net> 2929M: David S. Miller <davem@davemloft.net>
2920L: netdev@vger.kernel.org 2930L: netdev@vger.kernel.org
2931L: bpf@vger.kernel.org
2921S: Maintained 2932S: Maintained
2922F: arch/sparc/net/ 2933F: arch/sparc/net/
2923 2934
2924BPF JIT for X86 32-BIT 2935BPF JIT for X86 32-BIT
2925M: Wang YanQing <udknight@gmail.com> 2936M: Wang YanQing <udknight@gmail.com>
2926L: netdev@vger.kernel.org 2937L: netdev@vger.kernel.org
2938L: bpf@vger.kernel.org
2927S: Maintained 2939S: Maintained
2928F: arch/x86/net/bpf_jit_comp32.c 2940F: arch/x86/net/bpf_jit_comp32.c
2929 2941
@@ -2931,6 +2943,7 @@ BPF JIT for X86 64-BIT
2931M: Alexei Starovoitov <ast@kernel.org> 2943M: Alexei Starovoitov <ast@kernel.org>
2932M: Daniel Borkmann <daniel@iogearbox.net> 2944M: Daniel Borkmann <daniel@iogearbox.net>
2933L: netdev@vger.kernel.org 2945L: netdev@vger.kernel.org
2946L: bpf@vger.kernel.org
2934S: Supported 2947S: Supported
2935F: arch/x86/net/ 2948F: arch/x86/net/
2936X: arch/x86/net/bpf_jit_comp32.c 2949X: arch/x86/net/bpf_jit_comp32.c
@@ -3052,8 +3065,8 @@ F: include/linux/bcm963xx_nvram.h
3052F: include/linux/bcm963xx_tag.h 3065F: include/linux/bcm963xx_tag.h
3053 3066
3054BROADCOM BNX2 GIGABIT ETHERNET DRIVER 3067BROADCOM BNX2 GIGABIT ETHERNET DRIVER
3055M: Rasesh Mody <rasesh.mody@cavium.com> 3068M: Rasesh Mody <rmody@marvell.com>
3056M: Dept-GELinuxNICDev@cavium.com 3069M: GR-Linux-NIC-Dev@marvell.com
3057L: netdev@vger.kernel.org 3070L: netdev@vger.kernel.org
3058S: Supported 3071S: Supported
3059F: drivers/net/ethernet/broadcom/bnx2.* 3072F: drivers/net/ethernet/broadcom/bnx2.*
@@ -3072,9 +3085,9 @@ S: Supported
3072F: drivers/scsi/bnx2i/ 3085F: drivers/scsi/bnx2i/
3073 3086
3074BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER 3087BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
3075M: Ariel Elior <ariel.elior@cavium.com> 3088M: Ariel Elior <aelior@marvell.com>
3076M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> 3089M: Sudarsana Kalluru <skalluru@marvell.com>
3077M: everest-linux-l2@cavium.com 3090M: GR-everest-linux-l2@marvell.com
3078L: netdev@vger.kernel.org 3091L: netdev@vger.kernel.org
3079S: Supported 3092S: Supported
3080F: drivers/net/ethernet/broadcom/bnx2x/ 3093F: drivers/net/ethernet/broadcom/bnx2x/
@@ -3249,9 +3262,9 @@ S: Supported
3249F: drivers/scsi/bfa/ 3262F: drivers/scsi/bfa/
3250 3263
3251BROCADE BNA 10 GIGABIT ETHERNET DRIVER 3264BROCADE BNA 10 GIGABIT ETHERNET DRIVER
3252M: Rasesh Mody <rasesh.mody@cavium.com> 3265M: Rasesh Mody <rmody@marvell.com>
3253M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> 3266M: Sudarsana Kalluru <skalluru@marvell.com>
3254M: Dept-GELinuxNICDev@cavium.com 3267M: GR-Linux-NIC-Dev@marvell.com
3255L: netdev@vger.kernel.org 3268L: netdev@vger.kernel.org
3256S: Supported 3269S: Supported
3257F: drivers/net/ethernet/brocade/bna/ 3270F: drivers/net/ethernet/brocade/bna/
@@ -3385,9 +3398,8 @@ F: Documentation/media/v4l-drivers/cafe_ccic*
3385F: drivers/media/platform/marvell-ccic/ 3398F: drivers/media/platform/marvell-ccic/
3386 3399
3387CAIF NETWORK LAYER 3400CAIF NETWORK LAYER
3388M: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
3389L: netdev@vger.kernel.org 3401L: netdev@vger.kernel.org
3390S: Supported 3402S: Orphan
3391F: Documentation/networking/caif/ 3403F: Documentation/networking/caif/
3392F: drivers/net/caif/ 3404F: drivers/net/caif/
3393F: include/uapi/linux/caif/ 3405F: include/uapi/linux/caif/
@@ -3471,10 +3483,9 @@ F: drivers/i2c/busses/i2c-octeon*
3471F: drivers/i2c/busses/i2c-thunderx* 3483F: drivers/i2c/busses/i2c-thunderx*
3472 3484
3473CAVIUM LIQUIDIO NETWORK DRIVER 3485CAVIUM LIQUIDIO NETWORK DRIVER
3474M: Derek Chickles <derek.chickles@caviumnetworks.com> 3486M: Derek Chickles <dchickles@marvell.com>
3475M: Satanand Burla <satananda.burla@caviumnetworks.com> 3487M: Satanand Burla <sburla@marvell.com>
3476M: Felix Manlunas <felix.manlunas@caviumnetworks.com> 3488M: Felix Manlunas <fmanlunas@marvell.com>
3477M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
3478L: netdev@vger.kernel.org 3489L: netdev@vger.kernel.org
3479W: http://www.cavium.com 3490W: http://www.cavium.com
3480S: Supported 3491S: Supported
@@ -3951,7 +3962,7 @@ L: netdev@vger.kernel.org
3951S: Maintained 3962S: Maintained
3952F: drivers/net/ethernet/ti/cpmac.c 3963F: drivers/net/ethernet/ti/cpmac.c
3953 3964
3954CPU FREQUENCY DRIVERS 3965CPU FREQUENCY SCALING FRAMEWORK
3955M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 3966M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
3956M: Viresh Kumar <viresh.kumar@linaro.org> 3967M: Viresh Kumar <viresh.kumar@linaro.org>
3957L: linux-pm@vger.kernel.org 3968L: linux-pm@vger.kernel.org
@@ -3959,6 +3970,8 @@ S: Maintained
3959T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 3970T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
3960T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) 3971T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
3961B: https://bugzilla.kernel.org 3972B: https://bugzilla.kernel.org
3973F: Documentation/admin-guide/pm/cpufreq.rst
3974F: Documentation/admin-guide/pm/intel_pstate.rst
3962F: Documentation/cpu-freq/ 3975F: Documentation/cpu-freq/
3963F: Documentation/devicetree/bindings/cpufreq/ 3976F: Documentation/devicetree/bindings/cpufreq/
3964F: drivers/cpufreq/ 3977F: drivers/cpufreq/
@@ -3977,6 +3990,7 @@ F: drivers/cpufreq/arm_big_little.c
3977CPU POWER MONITORING SUBSYSTEM 3990CPU POWER MONITORING SUBSYSTEM
3978M: Thomas Renninger <trenn@suse.com> 3991M: Thomas Renninger <trenn@suse.com>
3979M: Shuah Khan <shuah@kernel.org> 3992M: Shuah Khan <shuah@kernel.org>
3993M: Shuah Khan <skhan@linuxfoundation.org>
3980L: linux-pm@vger.kernel.org 3994L: linux-pm@vger.kernel.org
3981S: Maintained 3995S: Maintained
3982F: tools/power/cpupower/ 3996F: tools/power/cpupower/
@@ -4006,13 +4020,14 @@ S: Supported
4006F: drivers/cpuidle/cpuidle-exynos.c 4020F: drivers/cpuidle/cpuidle-exynos.c
4007F: arch/arm/mach-exynos/pm.c 4021F: arch/arm/mach-exynos/pm.c
4008 4022
4009CPUIDLE DRIVERS 4023CPU IDLE TIME MANAGEMENT FRAMEWORK
4010M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 4024M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
4011M: Daniel Lezcano <daniel.lezcano@linaro.org> 4025M: Daniel Lezcano <daniel.lezcano@linaro.org>
4012L: linux-pm@vger.kernel.org 4026L: linux-pm@vger.kernel.org
4013S: Maintained 4027S: Maintained
4014T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 4028T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
4015B: https://bugzilla.kernel.org 4029B: https://bugzilla.kernel.org
4030F: Documentation/admin-guide/pm/cpuidle.rst
4016F: drivers/cpuidle/* 4031F: drivers/cpuidle/*
4017F: include/linux/cpuidle.h 4032F: include/linux/cpuidle.h
4018 4033
@@ -5178,7 +5193,7 @@ DRM DRIVERS FOR XEN
5178M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> 5193M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
5179T: git git://anongit.freedesktop.org/drm/drm-misc 5194T: git git://anongit.freedesktop.org/drm/drm-misc
5180L: dri-devel@lists.freedesktop.org 5195L: dri-devel@lists.freedesktop.org
5181L: xen-devel@lists.xen.org 5196L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
5182S: Supported 5197S: Supported
5183F: drivers/gpu/drm/xen/ 5198F: drivers/gpu/drm/xen/
5184F: Documentation/gpu/xen-front.rst 5199F: Documentation/gpu/xen-front.rst
@@ -6143,7 +6158,7 @@ FREESCALE SOC SOUND DRIVERS
6143M: Timur Tabi <timur@kernel.org> 6158M: Timur Tabi <timur@kernel.org>
6144M: Nicolin Chen <nicoleotsuka@gmail.com> 6159M: Nicolin Chen <nicoleotsuka@gmail.com>
6145M: Xiubo Li <Xiubo.Lee@gmail.com> 6160M: Xiubo Li <Xiubo.Lee@gmail.com>
6146R: Fabio Estevam <fabio.estevam@nxp.com> 6161R: Fabio Estevam <festevam@gmail.com>
6147L: alsa-devel@alsa-project.org (moderated for non-subscribers) 6162L: alsa-devel@alsa-project.org (moderated for non-subscribers)
6148L: linuxppc-dev@lists.ozlabs.org 6163L: linuxppc-dev@lists.ozlabs.org
6149S: Maintained 6164S: Maintained
@@ -8256,6 +8271,7 @@ F: include/uapi/linux/sunrpc/
8256 8271
8257KERNEL SELFTEST FRAMEWORK 8272KERNEL SELFTEST FRAMEWORK
8258M: Shuah Khan <shuah@kernel.org> 8273M: Shuah Khan <shuah@kernel.org>
8274M: Shuah Khan <skhan@linuxfoundation.org>
8259L: linux-kselftest@vger.kernel.org 8275L: linux-kselftest@vger.kernel.org
8260T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git 8276T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
8261Q: https://patchwork.kernel.org/project/linux-kselftest/list/ 8277Q: https://patchwork.kernel.org/project/linux-kselftest/list/
@@ -8478,6 +8494,7 @@ L7 BPF FRAMEWORK
8478M: John Fastabend <john.fastabend@gmail.com> 8494M: John Fastabend <john.fastabend@gmail.com>
8479M: Daniel Borkmann <daniel@iogearbox.net> 8495M: Daniel Borkmann <daniel@iogearbox.net>
8480L: netdev@vger.kernel.org 8496L: netdev@vger.kernel.org
8497L: bpf@vger.kernel.org
8481S: Maintained 8498S: Maintained
8482F: include/linux/skmsg.h 8499F: include/linux/skmsg.h
8483F: net/core/skmsg.c 8500F: net/core/skmsg.c
@@ -10686,9 +10703,9 @@ S: Maintained
10686F: drivers/net/netdevsim/* 10703F: drivers/net/netdevsim/*
10687 10704
10688NETXEN (1/10) GbE SUPPORT 10705NETXEN (1/10) GbE SUPPORT
10689M: Manish Chopra <manish.chopra@cavium.com> 10706M: Manish Chopra <manishc@marvell.com>
10690M: Rahul Verma <rahul.verma@cavium.com> 10707M: Rahul Verma <rahulv@marvell.com>
10691M: Dept-GELinuxNICDev@cavium.com 10708M: GR-Linux-NIC-Dev@marvell.com
10692L: netdev@vger.kernel.org 10709L: netdev@vger.kernel.org
10693S: Supported 10710S: Supported
10694F: drivers/net/ethernet/qlogic/netxen/ 10711F: drivers/net/ethernet/qlogic/netxen/
@@ -10889,7 +10906,7 @@ F: include/linux/nvmem-consumer.h
10889F: include/linux/nvmem-provider.h 10906F: include/linux/nvmem-provider.h
10890 10907
10891NXP SGTL5000 DRIVER 10908NXP SGTL5000 DRIVER
10892M: Fabio Estevam <fabio.estevam@nxp.com> 10909M: Fabio Estevam <festevam@gmail.com>
10893L: alsa-devel@alsa-project.org (moderated for non-subscribers) 10910L: alsa-devel@alsa-project.org (moderated for non-subscribers)
10894S: Maintained 10911S: Maintained
10895F: Documentation/devicetree/bindings/sound/sgtl5000.txt 10912F: Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -11303,10 +11320,12 @@ F: include/dt-bindings/
11303 11320
11304OPENCORES I2C BUS DRIVER 11321OPENCORES I2C BUS DRIVER
11305M: Peter Korsgaard <peter@korsgaard.com> 11322M: Peter Korsgaard <peter@korsgaard.com>
11323M: Andrew Lunn <andrew@lunn.ch>
11306L: linux-i2c@vger.kernel.org 11324L: linux-i2c@vger.kernel.org
11307S: Maintained 11325S: Maintained
11308F: Documentation/i2c/busses/i2c-ocores 11326F: Documentation/i2c/busses/i2c-ocores
11309F: drivers/i2c/busses/i2c-ocores.c 11327F: drivers/i2c/busses/i2c-ocores.c
11328F: include/linux/platform_data/i2c-ocores.h
11310 11329
11311OPENRISC ARCHITECTURE 11330OPENRISC ARCHITECTURE
11312M: Jonas Bonn <jonas@southpole.se> 11331M: Jonas Bonn <jonas@southpole.se>
@@ -11477,7 +11496,7 @@ F: Documentation/blockdev/paride.txt
11477F: drivers/block/paride/ 11496F: drivers/block/paride/
11478 11497
11479PARISC ARCHITECTURE 11498PARISC ARCHITECTURE
11480M: "James E.J. Bottomley" <jejb@parisc-linux.org> 11499M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
11481M: Helge Deller <deller@gmx.de> 11500M: Helge Deller <deller@gmx.de>
11482L: linux-parisc@vger.kernel.org 11501L: linux-parisc@vger.kernel.org
11483W: http://www.parisc-linux.org/ 11502W: http://www.parisc-linux.org/
@@ -12472,8 +12491,8 @@ S: Supported
12472F: drivers/scsi/qedi/ 12491F: drivers/scsi/qedi/
12473 12492
12474QLOGIC QL4xxx ETHERNET DRIVER 12493QLOGIC QL4xxx ETHERNET DRIVER
12475M: Ariel Elior <Ariel.Elior@cavium.com> 12494M: Ariel Elior <aelior@marvell.com>
12476M: everest-linux-l2@cavium.com 12495M: GR-everest-linux-l2@marvell.com
12477L: netdev@vger.kernel.org 12496L: netdev@vger.kernel.org
12478S: Supported 12497S: Supported
12479F: drivers/net/ethernet/qlogic/qed/ 12498F: drivers/net/ethernet/qlogic/qed/
@@ -12481,8 +12500,8 @@ F: include/linux/qed/
12481F: drivers/net/ethernet/qlogic/qede/ 12500F: drivers/net/ethernet/qlogic/qede/
12482 12501
12483QLOGIC QL4xxx RDMA DRIVER 12502QLOGIC QL4xxx RDMA DRIVER
12484M: Michal Kalderon <Michal.Kalderon@cavium.com> 12503M: Michal Kalderon <mkalderon@marvell.com>
12485M: Ariel Elior <Ariel.Elior@cavium.com> 12504M: Ariel Elior <aelior@marvell.com>
12486L: linux-rdma@vger.kernel.org 12505L: linux-rdma@vger.kernel.org
12487S: Supported 12506S: Supported
12488F: drivers/infiniband/hw/qedr/ 12507F: drivers/infiniband/hw/qedr/
@@ -12502,7 +12521,7 @@ F: Documentation/scsi/LICENSE.qla2xxx
12502F: drivers/scsi/qla2xxx/ 12521F: drivers/scsi/qla2xxx/
12503 12522
12504QLOGIC QLA3XXX NETWORK DRIVER 12523QLOGIC QLA3XXX NETWORK DRIVER
12505M: Dept-GELinuxNICDev@cavium.com 12524M: GR-Linux-NIC-Dev@marvell.com
12506L: netdev@vger.kernel.org 12525L: netdev@vger.kernel.org
12507S: Supported 12526S: Supported
12508F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx 12527F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
@@ -12516,16 +12535,16 @@ F: Documentation/scsi/LICENSE.qla4xxx
12516F: drivers/scsi/qla4xxx/ 12535F: drivers/scsi/qla4xxx/
12517 12536
12518QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER 12537QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
12519M: Shahed Shaikh <Shahed.Shaikh@cavium.com> 12538M: Shahed Shaikh <shshaikh@marvell.com>
12520M: Manish Chopra <manish.chopra@cavium.com> 12539M: Manish Chopra <manishc@marvell.com>
12521M: Dept-GELinuxNICDev@cavium.com 12540M: GR-Linux-NIC-Dev@marvell.com
12522L: netdev@vger.kernel.org 12541L: netdev@vger.kernel.org
12523S: Supported 12542S: Supported
12524F: drivers/net/ethernet/qlogic/qlcnic/ 12543F: drivers/net/ethernet/qlogic/qlcnic/
12525 12544
12526QLOGIC QLGE 10Gb ETHERNET DRIVER 12545QLOGIC QLGE 10Gb ETHERNET DRIVER
12527M: Manish Chopra <manish.chopra@cavium.com> 12546M: Manish Chopra <manishc@marvell.com>
12528M: Dept-GELinuxNICDev@cavium.com 12547M: GR-Linux-NIC-Dev@marvell.com
12529L: netdev@vger.kernel.org 12548L: netdev@vger.kernel.org
12530S: Supported 12549S: Supported
12531F: drivers/net/ethernet/qlogic/qlge/ 12550F: drivers/net/ethernet/qlogic/qlge/
@@ -12864,6 +12883,13 @@ F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
12864F: drivers/net/dsa/realtek-smi* 12883F: drivers/net/dsa/realtek-smi*
12865F: drivers/net/dsa/rtl83* 12884F: drivers/net/dsa/rtl83*
12866 12885
12886REDPINE WIRELESS DRIVER
12887M: Amitkumar Karwar <amitkarwar@gmail.com>
12888M: Siva Rebbagondla <siva8118@gmail.com>
12889L: linux-wireless@vger.kernel.org
12890S: Maintained
12891F: drivers/net/wireless/rsi/
12892
12867REGISTER MAP ABSTRACTION 12893REGISTER MAP ABSTRACTION
12868M: Mark Brown <broonie@kernel.org> 12894M: Mark Brown <broonie@kernel.org>
12869L: linux-kernel@vger.kernel.org 12895L: linux-kernel@vger.kernel.org
@@ -13692,6 +13718,15 @@ L: netdev@vger.kernel.org
13692S: Supported 13718S: Supported
13693F: drivers/net/ethernet/sfc/ 13719F: drivers/net/ethernet/sfc/
13694 13720
13721SFF/SFP/SFP+ MODULE SUPPORT
13722M: Russell King <linux@armlinux.org.uk>
13723L: netdev@vger.kernel.org
13724S: Maintained
13725F: drivers/net/phy/phylink.c
13726F: drivers/net/phy/sfp*
13727F: include/linux/phylink.h
13728F: include/linux/sfp.h
13729
13695SGI GRU DRIVER 13730SGI GRU DRIVER
13696M: Dimitri Sivanich <sivanich@sgi.com> 13731M: Dimitri Sivanich <sivanich@sgi.com>
13697S: Maintained 13732S: Maintained
@@ -13820,8 +13855,9 @@ F: drivers/media/mmc/siano/
13820 13855
13821SIFIVE DRIVERS 13856SIFIVE DRIVERS
13822M: Palmer Dabbelt <palmer@sifive.com> 13857M: Palmer Dabbelt <palmer@sifive.com>
13858M: Paul Walmsley <paul.walmsley@sifive.com>
13823L: linux-riscv@lists.infradead.org 13859L: linux-riscv@lists.infradead.org
13824T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git 13860T: git git://github.com/sifive/riscv-linux.git
13825S: Supported 13861S: Supported
13826K: sifive 13862K: sifive
13827N: sifive 13863N: sifive
@@ -14432,6 +14468,11 @@ M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
14432S: Odd Fixes 14468S: Odd Fixes
14433F: drivers/staging/rtl8712/ 14469F: drivers/staging/rtl8712/
14434 14470
14471STAGING - REALTEK RTL8188EU DRIVERS
14472M: Larry Finger <Larry.Finger@lwfinger.net>
14473S: Odd Fixes
14474F: drivers/staging/rtl8188eu/
14475
14435STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER 14476STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
14436M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> 14477M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
14437M: Teddy Wang <teddy.wang@siliconmotion.com> 14478M: Teddy Wang <teddy.wang@siliconmotion.com>
@@ -15802,7 +15843,6 @@ M: Alan Stern <stern@rowland.harvard.edu>
15802L: linux-usb@vger.kernel.org 15843L: linux-usb@vger.kernel.org
15803L: usb-storage@lists.one-eyed-alien.net 15844L: usb-storage@lists.one-eyed-alien.net
15804S: Maintained 15845S: Maintained
15805W: http://www.one-eyed-alien.net/~mdharm/linux-usb/
15806F: drivers/usb/storage/ 15846F: drivers/usb/storage/
15807 15847
15808USB MIDI DRIVER 15848USB MIDI DRIVER
@@ -15834,6 +15874,7 @@ F: drivers/usb/common/usb-otg-fsm.c
15834USB OVER IP DRIVER 15874USB OVER IP DRIVER
15835M: Valentina Manea <valentina.manea.m@gmail.com> 15875M: Valentina Manea <valentina.manea.m@gmail.com>
15836M: Shuah Khan <shuah@kernel.org> 15876M: Shuah Khan <shuah@kernel.org>
15877M: Shuah Khan <skhan@linuxfoundation.org>
15837L: linux-usb@vger.kernel.org 15878L: linux-usb@vger.kernel.org
15838S: Maintained 15879S: Maintained
15839F: Documentation/usb/usbip_protocol.txt 15880F: Documentation/usb/usbip_protocol.txt
@@ -16631,6 +16672,15 @@ S: Maintained
16631F: drivers/platform/x86/ 16672F: drivers/platform/x86/
16632F: drivers/platform/olpc/ 16673F: drivers/platform/olpc/
16633 16674
16675X86 PLATFORM DRIVERS - ARCH
16676R: Darren Hart <dvhart@infradead.org>
16677R: Andy Shevchenko <andy@infradead.org>
16678L: platform-driver-x86@vger.kernel.org
16679L: x86@kernel.org
16680T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
16681S: Maintained
16682F: arch/x86/platform
16683
16634X86 VDSO 16684X86 VDSO
16635M: Andy Lutomirski <luto@kernel.org> 16685M: Andy Lutomirski <luto@kernel.org>
16636L: linux-kernel@vger.kernel.org 16686L: linux-kernel@vger.kernel.org
@@ -16663,10 +16713,30 @@ T: git git://linuxtv.org/media_tree.git
16663S: Maintained 16713S: Maintained
16664F: drivers/media/tuners/tuner-xc2028.* 16714F: drivers/media/tuners/tuner-xc2028.*
16665 16715
16716XDP (eXpress Data Path)
16717M: Alexei Starovoitov <ast@kernel.org>
16718M: Daniel Borkmann <daniel@iogearbox.net>
16719M: David S. Miller <davem@davemloft.net>
16720M: Jakub Kicinski <jakub.kicinski@netronome.com>
16721M: Jesper Dangaard Brouer <hawk@kernel.org>
16722M: John Fastabend <john.fastabend@gmail.com>
16723L: netdev@vger.kernel.org
16724L: xdp-newbies@vger.kernel.org
16725L: bpf@vger.kernel.org
16726S: Supported
16727F: net/core/xdp.c
16728F: include/net/xdp.h
16729F: kernel/bpf/devmap.c
16730F: kernel/bpf/cpumap.c
16731F: include/trace/events/xdp.h
16732K: xdp
16733N: xdp
16734
16666XDP SOCKETS (AF_XDP) 16735XDP SOCKETS (AF_XDP)
16667M: Björn Töpel <bjorn.topel@intel.com> 16736M: Björn Töpel <bjorn.topel@intel.com>
16668M: Magnus Karlsson <magnus.karlsson@intel.com> 16737M: Magnus Karlsson <magnus.karlsson@intel.com>
16669L: netdev@vger.kernel.org 16738L: netdev@vger.kernel.org
16739L: bpf@vger.kernel.org
16670S: Maintained 16740S: Maintained
16671F: kernel/bpf/xskmap.c 16741F: kernel/bpf/xskmap.c
16672F: net/xdp/ 16742F: net/xdp/
diff --git a/Makefile b/Makefile
index 8c55b6404e19..ac5ac28a24e9 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 0 3PATCHLEVEL = 0
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc1 5EXTRAVERSION = -rc8
6NAME = Shy Crocodile 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -955,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION
955 endif 955 endif
956endif 956endif
957 957
958PHONY += prepare0
958 959
959ifeq ($(KBUILD_EXTMOD),) 960ifeq ($(KBUILD_EXTMOD),)
960core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ 961core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
@@ -1061,8 +1062,7 @@ scripts: scripts_basic scripts_dtc
1061# archprepare is used in arch Makefiles and when processed asm symlink, 1062# archprepare is used in arch Makefiles and when processed asm symlink,
1062# version.h and scripts_basic is processed / created. 1063# version.h and scripts_basic is processed / created.
1063 1064
1064# Listed in dependency order 1065PHONY += prepare archprepare prepare1 prepare2 prepare3
1065PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
1066 1066
1067# prepare3 is used to check if we are building in a separate output directory, 1067# prepare3 is used to check if we are building in a separate output directory,
1068# and if so do: 1068# and if so do:
@@ -1360,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS))
1360mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) 1360mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
1361mrproper-dirs := $(addprefix _mrproper_,scripts) 1361mrproper-dirs := $(addprefix _mrproper_,scripts)
1362 1362
1363PHONY += $(mrproper-dirs) mrproper archmrproper 1363PHONY += $(mrproper-dirs) mrproper
1364$(mrproper-dirs): 1364$(mrproper-dirs):
1365 $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) 1365 $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
1366 1366
1367mrproper: clean archmrproper $(mrproper-dirs) 1367mrproper: clean $(mrproper-dirs)
1368 $(call cmd,rmdirs) 1368 $(call cmd,rmdirs)
1369 $(call cmd,rmfiles) 1369 $(call cmd,rmfiles)
1370 1370
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
index 4d17cacd1462..432402c8e47f 100644
--- a/arch/alpha/include/asm/irq.h
+++ b/arch/alpha/include/asm/irq.h
@@ -56,15 +56,15 @@
56 56
57#elif defined(CONFIG_ALPHA_DP264) || \ 57#elif defined(CONFIG_ALPHA_DP264) || \
58 defined(CONFIG_ALPHA_LYNX) || \ 58 defined(CONFIG_ALPHA_LYNX) || \
59 defined(CONFIG_ALPHA_SHARK) || \ 59 defined(CONFIG_ALPHA_SHARK)
60 defined(CONFIG_ALPHA_EIGER)
61# define NR_IRQS 64 60# define NR_IRQS 64
62 61
63#elif defined(CONFIG_ALPHA_TITAN) 62#elif defined(CONFIG_ALPHA_TITAN)
64#define NR_IRQS 80 63#define NR_IRQS 80
65 64
66#elif defined(CONFIG_ALPHA_RAWHIDE) || \ 65#elif defined(CONFIG_ALPHA_RAWHIDE) || \
67 defined(CONFIG_ALPHA_TAKARA) 66 defined(CONFIG_ALPHA_TAKARA) || \
67 defined(CONFIG_ALPHA_EIGER)
68# define NR_IRQS 128 68# define NR_IRQS 128
69 69
70#elif defined(CONFIG_ALPHA_WILDFIRE) 70#elif defined(CONFIG_ALPHA_WILDFIRE)
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index d73dc473fbb9..188fc9256baf 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
78/* Macro for exception fixup code to access integer registers. */ 78/* Macro for exception fixup code to access integer registers. */
79#define dpf_reg(r) \ 79#define dpf_reg(r) \
80 (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ 80 (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
81 (r) <= 18 ? (r)+8 : (r)-10]) 81 (r) <= 18 ? (r)+10 : (r)-10])
82 82
83asmlinkage void 83asmlinkage void
84do_page_fault(unsigned long address, unsigned long mmcsr, 84do_page_fault(unsigned long address, unsigned long mmcsr,
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 376366a7db81..d750b302d5ab 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -191,7 +191,6 @@ config NR_CPUS
191 191
192config ARC_SMP_HALT_ON_RESET 192config ARC_SMP_HALT_ON_RESET
193 bool "Enable Halt-on-reset boot mode" 193 bool "Enable Halt-on-reset boot mode"
194 default y if ARC_UBOOT_SUPPORT
195 help 194 help
196 In SMP configuration cores can be configured as Halt-on-reset 195 In SMP configuration cores can be configured as Halt-on-reset
197 or they could all start at same time. For Halt-on-reset, non 196 or they could all start at same time. For Halt-on-reset, non
@@ -407,6 +406,14 @@ config ARC_HAS_ACCL_REGS
407 (also referred to as r58:r59). These can also be used by gcc as GPR so 406 (also referred to as r58:r59). These can also be used by gcc as GPR so
408 kernel needs to save/restore per process 407 kernel needs to save/restore per process
409 408
409config ARC_IRQ_NO_AUTOSAVE
410 bool "Disable hardware autosave regfile on interrupts"
411 default n
412 help
413 On HS cores, taken interrupt auto saves the regfile on stack.
414 This is programmable and can be optionally disabled in which case
415 software INTERRUPT_PROLOGUE/EPILGUE do the needed work
416
410endif # ISA_ARCV2 417endif # ISA_ARCV2
411 418
412endmenu # "ARC CPU Configuration" 419endmenu # "ARC CPU Configuration"
@@ -515,17 +522,6 @@ config ARC_DBG_TLB_PARANOIA
515 522
516endif 523endif
517 524
518config ARC_UBOOT_SUPPORT
519 bool "Support uboot arg Handling"
520 help
521 ARC Linux by default checks for uboot provided args as pointers to
522 external cmdline or DTB. This however breaks in absence of uboot,
523 when booting from Metaware debugger directly, as the registers are
524 not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
525 registers look like uboot args to kernel which then chokes.
526 So only enable the uboot arg checking/processing if users are sure
527 of uboot being in play.
528
529config ARC_BUILTIN_DTB_NAME 525config ARC_BUILTIN_DTB_NAME
530 string "Built in DTB" 526 string "Built in DTB"
531 help 527 help
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index 6e84060e7c90..621f59407d76 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5
31# CONFIG_ARC_HAS_LLSC is not set 31# CONFIG_ARC_HAS_LLSC is not set
32CONFIG_ARC_KVADDR_SIZE=402 32CONFIG_ARC_KVADDR_SIZE=402
33CONFIG_ARC_EMUL_UNALIGNED=y 33CONFIG_ARC_EMUL_UNALIGNED=y
34CONFIG_ARC_UBOOT_SUPPORT=y
35CONFIG_PREEMPT=y 34CONFIG_PREEMPT=y
36CONFIG_NET=y 35CONFIG_NET=y
37CONFIG_UNIX=y 36CONFIG_UNIX=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index 1e59a2e9c602..e447ace6fa1c 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y
13CONFIG_ARC_PLAT_AXS10X=y 13CONFIG_ARC_PLAT_AXS10X=y
14CONFIG_AXS103=y 14CONFIG_AXS103=y
15CONFIG_ISA_ARCV2=y 15CONFIG_ISA_ARCV2=y
16CONFIG_ARC_UBOOT_SUPPORT=y
17CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38" 16CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
18CONFIG_PREEMPT=y 17CONFIG_PREEMPT=y
19CONFIG_NET=y 18CONFIG_NET=y
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index b5c3f6c54b03..c82cdb10aaf4 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -15,8 +15,6 @@ CONFIG_AXS103=y
15CONFIG_ISA_ARCV2=y 15CONFIG_ISA_ARCV2=y
16CONFIG_SMP=y 16CONFIG_SMP=y
17# CONFIG_ARC_TIMERS_64BIT is not set 17# CONFIG_ARC_TIMERS_64BIT is not set
18# CONFIG_ARC_SMP_HALT_ON_RESET is not set
19CONFIG_ARC_UBOOT_SUPPORT=y
20CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" 18CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
21CONFIG_PREEMPT=y 19CONFIG_PREEMPT=y
22CONFIG_NET=y 20CONFIG_NET=y
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index feed50ce89fa..caa270261521 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -3,23 +3,19 @@ generic-y += bugs.h
3generic-y += compat.h 3generic-y += compat.h
4generic-y += device.h 4generic-y += device.h
5generic-y += div64.h 5generic-y += div64.h
6generic-y += dma-mapping.h
7generic-y += emergency-restart.h 6generic-y += emergency-restart.h
8generic-y += extable.h 7generic-y += extable.h
9generic-y += fb.h
10generic-y += ftrace.h 8generic-y += ftrace.h
11generic-y += hardirq.h 9generic-y += hardirq.h
12generic-y += hw_irq.h 10generic-y += hw_irq.h
13generic-y += irq_regs.h 11generic-y += irq_regs.h
14generic-y += irq_work.h 12generic-y += irq_work.h
15generic-y += kmap_types.h
16generic-y += local.h 13generic-y += local.h
17generic-y += local64.h 14generic-y += local64.h
18generic-y += mcs_spinlock.h 15generic-y += mcs_spinlock.h
19generic-y += mm-arch-hooks.h 16generic-y += mm-arch-hooks.h
20generic-y += msi.h 17generic-y += msi.h
21generic-y += parport.h 18generic-y += parport.h
22generic-y += pci.h
23generic-y += percpu.h 19generic-y += percpu.h
24generic-y += preempt.h 20generic-y += preempt.h
25generic-y += topology.h 21generic-y += topology.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 49bfbd879caa..a27eafdc8260 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -151,6 +151,14 @@ struct bcr_isa_arcv2 {
151#endif 151#endif
152}; 152};
153 153
154struct bcr_uarch_build_arcv2 {
155#ifdef CONFIG_CPU_BIG_ENDIAN
156 unsigned int pad:8, prod:8, maj:8, min:8;
157#else
158 unsigned int min:8, maj:8, prod:8, pad:8;
159#endif
160};
161
154struct bcr_mpy { 162struct bcr_mpy {
155#ifdef CONFIG_CPU_BIG_ENDIAN 163#ifdef CONFIG_CPU_BIG_ENDIAN
156 unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; 164 unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
@@ -216,6 +224,14 @@ struct bcr_fp_arcv2 {
216#endif 224#endif
217}; 225};
218 226
227struct bcr_actionpoint {
228#ifdef CONFIG_CPU_BIG_ENDIAN
229 unsigned int pad:21, min:1, num:2, ver:8;
230#else
231 unsigned int ver:8, num:2, min:1, pad:21;
232#endif
233};
234
219#include <soc/arc/timers.h> 235#include <soc/arc/timers.h>
220 236
221struct bcr_bpu_arcompact { 237struct bcr_bpu_arcompact {
@@ -283,7 +299,7 @@ struct cpuinfo_arc_cache {
283}; 299};
284 300
285struct cpuinfo_arc_bpu { 301struct cpuinfo_arc_bpu {
286 unsigned int ver, full, num_cache, num_pred; 302 unsigned int ver, full, num_cache, num_pred, ret_stk;
287}; 303};
288 304
289struct cpuinfo_arc_ccm { 305struct cpuinfo_arc_ccm {
@@ -302,7 +318,7 @@ struct cpuinfo_arc {
302 struct { 318 struct {
303 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, 319 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
304 fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4, 320 fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
305 debug:1, ap:1, smart:1, rtt:1, pad3:4, 321 ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
306 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; 322 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
307 } extn; 323 } extn;
308 struct bcr_mpy extn_mpy; 324 struct bcr_mpy extn_mpy;
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index ee9246184033..202b74c339f0 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
340/* 340/*
341 * __ffs: Similar to ffs, but zero based (0-31) 341 * __ffs: Similar to ffs, but zero based (0-31)
342 */ 342 */
343static inline __attribute__ ((const)) int __ffs(unsigned long word) 343static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
344{ 344{
345 if (!word) 345 if (!word)
346 return word; 346 return word;
@@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
400/* 400/*
401 * __ffs: Similar to ffs, but zero based (0-31) 401 * __ffs: Similar to ffs, but zero based (0-31)
402 */ 402 */
403static inline __attribute__ ((const)) int __ffs(unsigned long x) 403static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
404{ 404{
405 int n; 405 unsigned long n;
406 406
407 asm volatile( 407 asm volatile(
408 " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ 408 " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index f393b663413e..2ad77fb43639 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -52,6 +52,17 @@
52#define cache_line_size() SMP_CACHE_BYTES 52#define cache_line_size() SMP_CACHE_BYTES
53#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES 53#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
54 54
55/*
56 * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
57 * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
58 * alignment for any atomic64_t embedded in buffer.
59 * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
60 * value of 4 (and not 8) in ARC ABI.
61 */
62#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
63#define ARCH_SLAB_MINALIGN 8
64#endif
65
55extern void arc_cache_init(void); 66extern void arc_cache_init(void);
56extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); 67extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
57extern void read_decode_cache_bcr(void); 68extern void read_decode_cache_bcr(void);
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index 309f4e6721b3..225e7df2d8ed 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -17,6 +17,33 @@
17 ; 17 ;
18 ; Now manually save: r12, sp, fp, gp, r25 18 ; Now manually save: r12, sp, fp, gp, r25
19 19
20#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
21.ifnc \called_from, exception
22 st.as r9, [sp, -10] ; save r9 in it's final stack slot
23 sub sp, sp, 12 ; skip JLI, LDI, EI
24
25 PUSH lp_count
26 PUSHAX lp_start
27 PUSHAX lp_end
28 PUSH blink
29
30 PUSH r11
31 PUSH r10
32
33 sub sp, sp, 4 ; skip r9
34
35 PUSH r8
36 PUSH r7
37 PUSH r6
38 PUSH r5
39 PUSH r4
40 PUSH r3
41 PUSH r2
42 PUSH r1
43 PUSH r0
44.endif
45#endif
46
20#ifdef CONFIG_ARC_HAS_ACCL_REGS 47#ifdef CONFIG_ARC_HAS_ACCL_REGS
21 PUSH r59 48 PUSH r59
22 PUSH r58 49 PUSH r58
@@ -86,6 +113,33 @@
86 POP r59 113 POP r59
87#endif 114#endif
88 115
116#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
117.ifnc \called_from, exception
118 POP r0
119 POP r1
120 POP r2
121 POP r3
122 POP r4
123 POP r5
124 POP r6
125 POP r7
126 POP r8
127 POP r9
128 POP r10
129 POP r11
130
131 POP blink
132 POPAX lp_end
133 POPAX lp_start
134
135 POP r9
136 mov lp_count, r9
137
138 add sp, sp, 12 ; skip JLI, LDI, EI
139 ld.as r9, [sp, -10] ; reload r9 which got clobbered
140.endif
141#endif
142
89.endm 143.endm
90 144
91/*------------------------------------------------------------------------*/ 145/*------------------------------------------------------------------------*/
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 9185541035cc..6958545390f0 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
103 103
104 /* counts condition */ 104 /* counts condition */
105 [PERF_COUNT_HW_INSTRUCTIONS] = "iall", 105 [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
106 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ 106 /* All jump instructions that are taken */
107 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
107 [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ 108 [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
108#ifdef CONFIG_ISA_ARCV2 109#ifdef CONFIG_ISA_ARCV2
109 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", 110 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index c9173c02081c..eabc3efa6c6d 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
207 */ 207 */
208 "=&r" (tmp), "+r" (to), "+r" (from) 208 "=&r" (tmp), "+r" (to), "+r" (from)
209 : 209 :
210 : "lp_count", "lp_start", "lp_end", "memory"); 210 : "lp_count", "memory");
211 211
212 return n; 212 return n;
213 } 213 }
@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
433 */ 433 */
434 "=&r" (tmp), "+r" (to), "+r" (from) 434 "=&r" (tmp), "+r" (to), "+r" (from)
435 : 435 :
436 : "lp_count", "lp_start", "lp_end", "memory"); 436 : "lp_count", "memory");
437 437
438 return n; 438 return n;
439 } 439 }
@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
653 " .previous \n" 653 " .previous \n"
654 : "+r"(d_char), "+r"(res) 654 : "+r"(d_char), "+r"(res)
655 : "i"(0) 655 : "i"(0)
656 : "lp_count", "lp_start", "lp_end", "memory"); 656 : "lp_count", "memory");
657 657
658 return res; 658 return res;
659} 659}
@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
686 " .previous \n" 686 " .previous \n"
687 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) 687 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
688 : "g"(-EFAULT), "r"(count) 688 : "g"(-EFAULT), "r"(count)
689 : "lp_count", "lp_start", "lp_end", "memory"); 689 : "lp_count", "memory");
690 690
691 return res; 691 return res;
692} 692}
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index cc558a25b8fa..562089d62d9d 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -209,7 +209,9 @@ restore_regs:
209;####### Return from Intr ####### 209;####### Return from Intr #######
210 210
211debug_marker_l1: 211debug_marker_l1:
212 bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot 212 ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
213 btst r0, STATUS_DE_BIT ; Z flag set if bit clear
214 bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
213 215
214.Lisr_ret_fast_path: 216.Lisr_ret_fast_path:
215 ; Handle special case #1: (Entry via Exception, Return via IRQ) 217 ; Handle special case #1: (Entry via Exception, Return via IRQ)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 8b90d25a15cc..30e090625916 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -17,6 +17,7 @@
17#include <asm/entry.h> 17#include <asm/entry.h>
18#include <asm/arcregs.h> 18#include <asm/arcregs.h>
19#include <asm/cache.h> 19#include <asm/cache.h>
20#include <asm/irqflags.h>
20 21
21.macro CPU_EARLY_SETUP 22.macro CPU_EARLY_SETUP
22 23
@@ -47,6 +48,15 @@
47 sr r5, [ARC_REG_DC_CTRL] 48 sr r5, [ARC_REG_DC_CTRL]
48 49
491: 501:
51
52#ifdef CONFIG_ISA_ARCV2
53 ; Unaligned access is disabled at reset, so re-enable early as
54 ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
55 ; by default
56 lr r5, [status32]
57 bset r5, r5, STATUS_AD_BIT
58 kflag r5
59#endif
50.endm 60.endm
51 61
52 .section .init.text, "ax",@progbits 62 .section .init.text, "ax",@progbits
@@ -90,15 +100,13 @@ ENTRY(stext)
90 st.ab 0, [r5, 4] 100 st.ab 0, [r5, 4]
911: 1011:
92 102
93#ifdef CONFIG_ARC_UBOOT_SUPPORT
94 ; Uboot - kernel ABI 103 ; Uboot - kernel ABI
95 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 104 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
96 ; r1 = magic number (board identity, unused as of now 105 ; r1 = magic number (always zero as of now)
97 ; r2 = pointer to uboot provided cmdline or external DTB in mem 106 ; r2 = pointer to uboot provided cmdline or external DTB in mem
98 ; These are handled later in setup_arch() 107 ; These are handled later in handle_uboot_args()
99 st r0, [@uboot_tag] 108 st r0, [@uboot_tag]
100 st r2, [@uboot_arg] 109 st r2, [@uboot_arg]
101#endif
102 110
103 ; setup "current" tsk and optionally cache it in dedicated r25 111 ; setup "current" tsk and optionally cache it in dedicated r25
104 mov r9, @init_task 112 mov r9, @init_task
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 067ea362fb3e..cf18b3e5a934 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -49,11 +49,13 @@ void arc_init_IRQ(void)
49 49
50 *(unsigned int *)&ictrl = 0; 50 *(unsigned int *)&ictrl = 0;
51 51
52#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
52 ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ 53 ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
53 ictrl.save_blink = 1; 54 ictrl.save_blink = 1;
54 ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ 55 ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
55 ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ 56 ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
56 ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ 57 ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
58#endif
57 59
58 WRITE_AUX(AUX_IRQ_CTRL, ictrl); 60 WRITE_AUX(AUX_IRQ_CTRL, ictrl);
59 61
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 8aec462d90fb..861a8aea51f9 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -1,15 +1,10 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * Linux performance counter support for ARC700 series 2//
3 * 3// Linux performance counter support for ARC CPUs.
4 * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) 4// This code is inspired by the perf support of various other architectures.
5 * 5//
6 * This code is inspired by the perf support of various other architectures. 6// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
7 * 7
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#include <linux/errno.h> 8#include <linux/errno.h>
14#include <linux/interrupt.h> 9#include <linux/interrupt.h>
15#include <linux/module.h> 10#include <linux/module.h>
@@ -19,12 +14,31 @@
19#include <asm/arcregs.h> 14#include <asm/arcregs.h>
20#include <asm/stacktrace.h> 15#include <asm/stacktrace.h>
21 16
17/* HW holds 8 symbols + one for null terminator */
18#define ARCPMU_EVENT_NAME_LEN 9
19
20enum arc_pmu_attr_groups {
21 ARCPMU_ATTR_GR_EVENTS,
22 ARCPMU_ATTR_GR_FORMATS,
23 ARCPMU_NR_ATTR_GR
24};
25
26struct arc_pmu_raw_event_entry {
27 char name[ARCPMU_EVENT_NAME_LEN];
28};
29
22struct arc_pmu { 30struct arc_pmu {
23 struct pmu pmu; 31 struct pmu pmu;
24 unsigned int irq; 32 unsigned int irq;
25 int n_counters; 33 int n_counters;
34 int n_events;
26 u64 max_period; 35 u64 max_period;
27 int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; 36 int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
37
38 struct arc_pmu_raw_event_entry *raw_entry;
39 struct attribute **attrs;
40 struct perf_pmu_events_attr *attr;
41 const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
28}; 42};
29 43
30struct arc_pmu_cpu { 44struct arc_pmu_cpu {
@@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data)
49{ 63{
50 struct arc_callchain_trace *ctrl = data; 64 struct arc_callchain_trace *ctrl = data;
51 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; 65 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
66
52 perf_callchain_store(entry, addr); 67 perf_callchain_store(entry, addr);
53 68
54 if (ctrl->depth++ < 3) 69 if (ctrl->depth++ < 3)
@@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data)
57 return -1; 72 return -1;
58} 73}
59 74
60void 75void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
61perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 76 struct pt_regs *regs)
62{ 77{
63 struct arc_callchain_trace ctrl = { 78 struct arc_callchain_trace ctrl = {
64 .depth = 0, 79 .depth = 0,
@@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
68 arc_unwind_core(NULL, regs, callchain_trace, &ctrl); 83 arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
69} 84}
70 85
71void 86void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
72perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 87 struct pt_regs *regs)
73{ 88{
74 /* 89 /*
75 * User stack can't be unwound trivially with kernel dwarf unwinder 90 * User stack can't be unwound trivially with kernel dwarf unwinder
@@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu;
82static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); 97static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
83 98
84/* read counter #idx; note that counter# != event# on ARC! */ 99/* read counter #idx; note that counter# != event# on ARC! */
85static uint64_t arc_pmu_read_counter(int idx) 100static u64 arc_pmu_read_counter(int idx)
86{ 101{
87 uint32_t tmp; 102 u32 tmp;
88 uint64_t result; 103 u64 result;
89 104
90 /* 105 /*
91 * ARC supports making 'snapshots' of the counters, so we don't 106 * ARC supports making 'snapshots' of the counters, so we don't
@@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx)
94 write_aux_reg(ARC_REG_PCT_INDEX, idx); 109 write_aux_reg(ARC_REG_PCT_INDEX, idx);
95 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 110 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
96 write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); 111 write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
97 result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; 112 result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
98 result |= read_aux_reg(ARC_REG_PCT_SNAPL); 113 result |= read_aux_reg(ARC_REG_PCT_SNAPL);
99 114
100 return result; 115 return result;
@@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx)
103static void arc_perf_event_update(struct perf_event *event, 118static void arc_perf_event_update(struct perf_event *event,
104 struct hw_perf_event *hwc, int idx) 119 struct hw_perf_event *hwc, int idx)
105{ 120{
106 uint64_t prev_raw_count = local64_read(&hwc->prev_count); 121 u64 prev_raw_count = local64_read(&hwc->prev_count);
107 uint64_t new_raw_count = arc_pmu_read_counter(idx); 122 u64 new_raw_count = arc_pmu_read_counter(idx);
108 int64_t delta = new_raw_count - prev_raw_count; 123 s64 delta = new_raw_count - prev_raw_count;
109 124
110 /* 125 /*
111 * We aren't afraid of hwc->prev_count changing beneath our feet 126 * We aren't afraid of hwc->prev_count changing beneath our feet
@@ -155,7 +170,7 @@ static int arc_pmu_event_init(struct perf_event *event)
155 int ret; 170 int ret;
156 171
157 if (!is_sampling_event(event)) { 172 if (!is_sampling_event(event)) {
158 hwc->sample_period = arc_pmu->max_period; 173 hwc->sample_period = arc_pmu->max_period;
159 hwc->last_period = hwc->sample_period; 174 hwc->last_period = hwc->sample_period;
160 local64_set(&hwc->period_left, hwc->sample_period); 175 local64_set(&hwc->period_left, hwc->sample_period);
161 } 176 }
@@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event)
192 pr_debug("init cache event with h/w %08x \'%s\'\n", 207 pr_debug("init cache event with h/w %08x \'%s\'\n",
193 (int)hwc->config, arc_pmu_ev_hw_map[ret]); 208 (int)hwc->config, arc_pmu_ev_hw_map[ret]);
194 return 0; 209 return 0;
210
211 case PERF_TYPE_RAW:
212 if (event->attr.config >= arc_pmu->n_events)
213 return -ENOENT;
214
215 hwc->config |= event->attr.config;
216 pr_debug("init raw event with idx %lld \'%s\'\n",
217 event->attr.config,
218 arc_pmu->raw_entry[event->attr.config].name);
219
220 return 0;
221
195 default: 222 default:
196 return -ENOENT; 223 return -ENOENT;
197 } 224 }
@@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event)
200/* starts all counters */ 227/* starts all counters */
201static void arc_pmu_enable(struct pmu *pmu) 228static void arc_pmu_enable(struct pmu *pmu)
202{ 229{
203 uint32_t tmp; 230 u32 tmp;
204 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 231 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
205 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); 232 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
206} 233}
@@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu)
208/* stops all counters */ 235/* stops all counters */
209static void arc_pmu_disable(struct pmu *pmu) 236static void arc_pmu_disable(struct pmu *pmu)
210{ 237{
211 uint32_t tmp; 238 u32 tmp;
212 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 239 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
213 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); 240 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
214} 241}
@@ -228,7 +255,7 @@ static int arc_pmu_event_set_period(struct perf_event *event)
228 local64_set(&hwc->period_left, left); 255 local64_set(&hwc->period_left, left);
229 hwc->last_period = period; 256 hwc->last_period = period;
230 overflow = 1; 257 overflow = 1;
231 } else if (unlikely(left <= 0)) { 258 } else if (unlikely(left <= 0)) {
232 /* left underflowed by less than period. */ 259 /* left underflowed by less than period. */
233 left += period; 260 left += period;
234 local64_set(&hwc->period_left, left); 261 local64_set(&hwc->period_left, left);
@@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event)
246 write_aux_reg(ARC_REG_PCT_INDEX, idx); 273 write_aux_reg(ARC_REG_PCT_INDEX, idx);
247 274
248 /* Write value */ 275 /* Write value */
249 write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); 276 write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
250 write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); 277 write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
251 278
252 perf_event_update_userpage(event); 279 perf_event_update_userpage(event);
253 280
@@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags)
277 /* Enable interrupt for this counter */ 304 /* Enable interrupt for this counter */
278 if (is_sampling_event(event)) 305 if (is_sampling_event(event))
279 write_aux_reg(ARC_REG_PCT_INT_CTRL, 306 write_aux_reg(ARC_REG_PCT_INT_CTRL,
280 read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); 307 read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
281 308
282 /* enable ARC pmu here */ 309 /* enable ARC pmu here */
283 write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ 310 write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
@@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
295 * Reset interrupt flag by writing of 1. This is required 322 * Reset interrupt flag by writing of 1. This is required
296 * to make sure pending interrupt was not left. 323 * to make sure pending interrupt was not left.
297 */ 324 */
298 write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); 325 write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
299 write_aux_reg(ARC_REG_PCT_INT_CTRL, 326 write_aux_reg(ARC_REG_PCT_INT_CTRL,
300 read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); 327 read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
301 } 328 }
302 329
303 if (!(event->hw.state & PERF_HES_STOPPED)) { 330 if (!(event->hw.state & PERF_HES_STOPPED)) {
@@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags)
349 376
350 if (is_sampling_event(event)) { 377 if (is_sampling_event(event)) {
351 /* Mimic full counter overflow as other arches do */ 378 /* Mimic full counter overflow as other arches do */
352 write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); 379 write_aux_reg(ARC_REG_PCT_INT_CNTL,
380 lower_32_bits(arc_pmu->max_period));
353 write_aux_reg(ARC_REG_PCT_INT_CNTH, 381 write_aux_reg(ARC_REG_PCT_INT_CNTH,
354 (arc_pmu->max_period >> 32)); 382 upper_32_bits(arc_pmu->max_period));
355 } 383 }
356 384
357 write_aux_reg(ARC_REG_PCT_CONFIG, 0); 385 write_aux_reg(ARC_REG_PCT_CONFIG, 0);
@@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
392 idx = __ffs(active_ints); 420 idx = __ffs(active_ints);
393 421
394 /* Reset interrupt flag by writing of 1 */ 422 /* Reset interrupt flag by writing of 1 */
395 write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); 423 write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
396 424
397 /* 425 /*
398 * On reset of "interrupt active" bit corresponding 426 * On reset of "interrupt active" bit corresponding
@@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
400 * Now we need to re-enable interrupt for the counter. 428 * Now we need to re-enable interrupt for the counter.
401 */ 429 */
402 write_aux_reg(ARC_REG_PCT_INT_CTRL, 430 write_aux_reg(ARC_REG_PCT_INT_CTRL,
403 read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); 431 read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
404 432
405 event = pmu_cpu->act_counter[idx]; 433 event = pmu_cpu->act_counter[idx];
406 hwc = &event->hw; 434 hwc = &event->hw;
@@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
414 arc_pmu_stop(event, 0); 442 arc_pmu_stop(event, 0);
415 } 443 }
416 444
417 active_ints &= ~(1U << idx); 445 active_ints &= ~BIT(idx);
418 } while (active_ints); 446 } while (active_ints);
419 447
420done: 448done:
@@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data)
441 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); 469 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
442} 470}
443 471
472/* Event field occupies the bottom 15 bits of our config field */
473PMU_FORMAT_ATTR(event, "config:0-14");
474static struct attribute *arc_pmu_format_attrs[] = {
475 &format_attr_event.attr,
476 NULL,
477};
478
479static struct attribute_group arc_pmu_format_attr_gr = {
480 .name = "format",
481 .attrs = arc_pmu_format_attrs,
482};
483
484static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
485 struct device_attribute *attr,
486 char *page)
487{
488 struct perf_pmu_events_attr *pmu_attr;
489
490 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
491 return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
492}
493
494/*
495 * We don't add attrs here as we don't have pre-defined list of perf events.
496 * We will generate and add attrs dynamically in probe() after we read HW
497 * configuration.
498 */
499static struct attribute_group arc_pmu_events_attr_gr = {
500 .name = "events",
501};
502
503static void arc_pmu_add_raw_event_attr(int j, char *str)
504{
505 memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
506 arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
507 arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
508 arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
509 arc_pmu->attr[j].id = j;
510 arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
511}
512
513static int arc_pmu_raw_alloc(struct device *dev)
514{
515 arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
516 sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
517 if (!arc_pmu->attr)
518 return -ENOMEM;
519
520 arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
521 sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
522 if (!arc_pmu->attrs)
523 return -ENOMEM;
524
525 arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
526 sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
527 if (!arc_pmu->raw_entry)
528 return -ENOMEM;
529
530 return 0;
531}
532
533static inline bool event_in_hw_event_map(int i, char *name)
534{
535 if (!arc_pmu_ev_hw_map[i])
536 return false;
537
538 if (!strlen(arc_pmu_ev_hw_map[i]))
539 return false;
540
541 if (strcmp(arc_pmu_ev_hw_map[i], name))
542 return false;
543
544 return true;
545}
546
547static void arc_pmu_map_hw_event(int j, char *str)
548{
549 int i;
550
551 /* See if HW condition has been mapped to a perf event_id */
552 for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
553 if (event_in_hw_event_map(i, str)) {
554 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
555 i, str, j);
556 arc_pmu->ev_hw_idx[i] = j;
557 }
558 }
559}
560
444static int arc_pmu_device_probe(struct platform_device *pdev) 561static int arc_pmu_device_probe(struct platform_device *pdev)
445{ 562{
446 struct arc_reg_pct_build pct_bcr; 563 struct arc_reg_pct_build pct_bcr;
447 struct arc_reg_cc_build cc_bcr; 564 struct arc_reg_cc_build cc_bcr;
448 int i, j, has_interrupts; 565 int i, has_interrupts;
449 int counter_size; /* in bits */ 566 int counter_size; /* in bits */
450 567
451 union cc_name { 568 union cc_name {
452 struct { 569 struct {
453 uint32_t word0, word1; 570 u32 word0, word1;
454 char sentinel; 571 char sentinel;
455 } indiv; 572 } indiv;
456 char str[9]; 573 char str[ARCPMU_EVENT_NAME_LEN];
457 } cc_name; 574 } cc_name;
458 575
459 576
@@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
463 return -ENODEV; 580 return -ENODEV;
464 } 581 }
465 BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); 582 BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
466 BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); 583 if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
584 return -EINVAL;
467 585
468 READ_BCR(ARC_REG_CC_BUILD, cc_bcr); 586 READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
469 BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ 587 if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
588 return -EINVAL;
470 589
471 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); 590 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
472 if (!arc_pmu) 591 if (!arc_pmu)
473 return -ENOMEM; 592 return -ENOMEM;
474 593
594 arc_pmu->n_events = cc_bcr.c;
595
596 if (arc_pmu_raw_alloc(&pdev->dev))
597 return -ENOMEM;
598
475 has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; 599 has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
476 600
477 arc_pmu->n_counters = pct_bcr.c; 601 arc_pmu->n_counters = pct_bcr.c;
@@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
481 605
482 pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", 606 pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
483 arc_pmu->n_counters, counter_size, cc_bcr.c, 607 arc_pmu->n_counters, counter_size, cc_bcr.c,
484 has_interrupts ? ", [overflow IRQ support]":""); 608 has_interrupts ? ", [overflow IRQ support]" : "");
485 609
486 cc_name.str[8] = 0; 610 cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
487 for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) 611 for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
488 arc_pmu->ev_hw_idx[i] = -1; 612 arc_pmu->ev_hw_idx[i] = -1;
489 613
490 /* loop thru all available h/w condition indexes */ 614 /* loop thru all available h/w condition indexes */
491 for (j = 0; j < cc_bcr.c; j++) { 615 for (i = 0; i < cc_bcr.c; i++) {
492 write_aux_reg(ARC_REG_CC_INDEX, j); 616 write_aux_reg(ARC_REG_CC_INDEX, i);
493 cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); 617 cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
494 cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); 618 cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
495 619
496 /* See if it has been mapped to a perf event_id */ 620 arc_pmu_map_hw_event(i, cc_name.str);
497 for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { 621 arc_pmu_add_raw_event_attr(i, cc_name.str);
498 if (arc_pmu_ev_hw_map[i] &&
499 !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
500 strlen(arc_pmu_ev_hw_map[i])) {
501 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
502 i, cc_name.str, j);
503 arc_pmu->ev_hw_idx[i] = j;
504 }
505 }
506 } 622 }
507 623
624 arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
625 arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
626 arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
627
508 arc_pmu->pmu = (struct pmu) { 628 arc_pmu->pmu = (struct pmu) {
509 .pmu_enable = arc_pmu_enable, 629 .pmu_enable = arc_pmu_enable,
510 .pmu_disable = arc_pmu_disable, 630 .pmu_disable = arc_pmu_disable,
@@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
514 .start = arc_pmu_start, 634 .start = arc_pmu_start,
515 .stop = arc_pmu_stop, 635 .stop = arc_pmu_stop,
516 .read = arc_pmu_read, 636 .read = arc_pmu_read,
637 .attr_groups = arc_pmu->attr_groups,
517 }; 638 };
518 639
519 if (has_interrupts) { 640 if (has_interrupts) {
@@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
535 } else 656 } else
536 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 657 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
537 658
538 return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); 659 /*
660 * perf parser doesn't really like '-' symbol in events name, so let's
661 * use '_' in arc pct name as it goes to kernel PMU event prefix.
662 */
663 return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
539} 664}
540 665
541#ifdef CONFIG_OF
542static const struct of_device_id arc_pmu_match[] = { 666static const struct of_device_id arc_pmu_match[] = {
543 { .compatible = "snps,arc700-pct" }, 667 { .compatible = "snps,arc700-pct" },
544 { .compatible = "snps,archs-pct" }, 668 { .compatible = "snps,archs-pct" },
545 {}, 669 {},
546}; 670};
547MODULE_DEVICE_TABLE(of, arc_pmu_match); 671MODULE_DEVICE_TABLE(of, arc_pmu_match);
548#endif
549 672
550static struct platform_driver arc_pmu_driver = { 673static struct platform_driver arc_pmu_driver = {
551 .driver = { 674 .driver = {
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 2e018b8c2e19..7b2340996cf8 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void)
123 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 123 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
124 const struct id_to_str *tbl; 124 const struct id_to_str *tbl;
125 struct bcr_isa_arcv2 isa; 125 struct bcr_isa_arcv2 isa;
126 struct bcr_actionpoint ap;
126 127
127 FIX_PTR(cpu); 128 FIX_PTR(cpu);
128 129
@@ -195,20 +196,40 @@ static void read_arc_build_cfg_regs(void)
195 cpu->bpu.full = bpu.ft; 196 cpu->bpu.full = bpu.ft;
196 cpu->bpu.num_cache = 256 << bpu.bce; 197 cpu->bpu.num_cache = 256 << bpu.bce;
197 cpu->bpu.num_pred = 2048 << bpu.pte; 198 cpu->bpu.num_pred = 2048 << bpu.pte;
199 cpu->bpu.ret_stk = 4 << bpu.rse;
198 200
199 if (cpu->core.family >= 0x54) { 201 if (cpu->core.family >= 0x54) {
200 unsigned int exec_ctrl;
201 202
202 READ_BCR(AUX_EXEC_CTRL, exec_ctrl); 203 struct bcr_uarch_build_arcv2 uarch;
203 cpu->extn.dual_enb = !(exec_ctrl & 1);
204 204
205 /* dual issue always present for this core */ 205 /*
206 cpu->extn.dual = 1; 206 * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
207 * dual issue only (HS4x). But next uarch rev (1:0)
208 * allows it be configured for single issue (HS3x)
209 * Ensure we fiddle with dual issue only on HS4x
210 */
211 READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
212
213 if (uarch.prod == 4) {
214 unsigned int exec_ctrl;
215
216 /* dual issue hardware always present */
217 cpu->extn.dual = 1;
218
219 READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
220
221 /* dual issue hardware enabled ? */
222 cpu->extn.dual_enb = !(exec_ctrl & 1);
223
224 }
207 } 225 }
208 } 226 }
209 227
210 READ_BCR(ARC_REG_AP_BCR, bcr); 228 READ_BCR(ARC_REG_AP_BCR, ap);
211 cpu->extn.ap = bcr.ver ? 1 : 0; 229 if (ap.ver) {
230 cpu->extn.ap_num = 2 << ap.num;
231 cpu->extn.ap_full = !ap.min;
232 }
212 233
213 READ_BCR(ARC_REG_SMART_BCR, bcr); 234 READ_BCR(ARC_REG_SMART_BCR, bcr);
214 cpu->extn.smart = bcr.ver ? 1 : 0; 235 cpu->extn.smart = bcr.ver ? 1 : 0;
@@ -216,8 +237,6 @@ static void read_arc_build_cfg_regs(void)
216 READ_BCR(ARC_REG_RTT_BCR, bcr); 237 READ_BCR(ARC_REG_RTT_BCR, bcr);
217 cpu->extn.rtt = bcr.ver ? 1 : 0; 238 cpu->extn.rtt = bcr.ver ? 1 : 0;
218 239
219 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
220
221 READ_BCR(ARC_REG_ISA_CFG_BCR, isa); 240 READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
222 241
223 /* some hacks for lack of feature BCR info in old ARC700 cores */ 242 /* some hacks for lack of feature BCR info in old ARC700 cores */
@@ -299,10 +318,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
299 318
300 if (cpu->bpu.ver) 319 if (cpu->bpu.ver)
301 n += scnprintf(buf + n, len - n, 320 n += scnprintf(buf + n, len - n,
302 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d", 321 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
303 IS_AVAIL1(cpu->bpu.full, "full"), 322 IS_AVAIL1(cpu->bpu.full, "full"),
304 IS_AVAIL1(!cpu->bpu.full, "partial"), 323 IS_AVAIL1(!cpu->bpu.full, "partial"),
305 cpu->bpu.num_cache, cpu->bpu.num_pred); 324 cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
306 325
307 if (is_isa_arcv2()) { 326 if (is_isa_arcv2()) {
308 struct bcr_lpb lpb; 327 struct bcr_lpb lpb;
@@ -336,11 +355,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
336 IS_AVAIL1(cpu->extn.fpu_sp, "SP "), 355 IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
337 IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); 356 IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
338 357
339 if (cpu->extn.debug) 358 if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
340 n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n", 359 n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
341 IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
342 IS_AVAIL1(cpu->extn.smart, "smaRT "), 360 IS_AVAIL1(cpu->extn.smart, "smaRT "),
343 IS_AVAIL1(cpu->extn.rtt, "RTT ")); 361 IS_AVAIL1(cpu->extn.rtt, "RTT "));
362 if (cpu->extn.ap_num) {
363 n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
364 cpu->extn.ap_num,
365 cpu->extn.ap_full ? "full":"min");
366 }
367 n += scnprintf(buf + n, len - n, "\n");
368 }
344 369
345 if (cpu->dccm.sz || cpu->iccm.sz) 370 if (cpu->dccm.sz || cpu->iccm.sz)
346 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", 371 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
@@ -453,43 +478,78 @@ void setup_processor(void)
453 arc_chk_core_config(); 478 arc_chk_core_config();
454} 479}
455 480
456static inline int is_kernel(unsigned long addr) 481static inline bool uboot_arg_invalid(unsigned long addr)
457{ 482{
458 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) 483 /*
459 return 1; 484 * Check that it is a untranslated address (although MMU is not enabled
460 return 0; 485 * yet, it being a high address ensures this is not by fluke)
486 */
487 if (addr < PAGE_OFFSET)
488 return true;
489
490 /* Check that address doesn't clobber resident kernel image */
491 return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
461} 492}
462 493
463void __init setup_arch(char **cmdline_p) 494#define IGNORE_ARGS "Ignore U-boot args: "
495
496/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
497#define UBOOT_TAG_NONE 0
498#define UBOOT_TAG_CMDLINE 1
499#define UBOOT_TAG_DTB 2
500
501void __init handle_uboot_args(void)
464{ 502{
465#ifdef CONFIG_ARC_UBOOT_SUPPORT 503 bool use_embedded_dtb = true;
466 /* make sure that uboot passed pointer to cmdline/dtb is valid */ 504 bool append_cmdline = false;
467 if (uboot_tag && is_kernel((unsigned long)uboot_arg)) 505
468 panic("Invalid uboot arg\n"); 506 /* check that we know this tag */
469 507 if (uboot_tag != UBOOT_TAG_NONE &&
470 /* See if u-boot passed an external Device Tree blob */ 508 uboot_tag != UBOOT_TAG_CMDLINE &&
471 machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ 509 uboot_tag != UBOOT_TAG_DTB) {
472 if (!machine_desc) 510 pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
473#endif 511 goto ignore_uboot_args;
474 { 512 }
475 /* No, so try the embedded one */ 513
514 if (uboot_tag != UBOOT_TAG_NONE &&
515 uboot_arg_invalid((unsigned long)uboot_arg)) {
516 pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
517 goto ignore_uboot_args;
518 }
519
520 /* see if U-boot passed an external Device Tree blob */
521 if (uboot_tag == UBOOT_TAG_DTB) {
522 machine_desc = setup_machine_fdt((void *)uboot_arg);
523
524 /* external Device Tree blob is invalid - use embedded one */
525 use_embedded_dtb = !machine_desc;
526 }
527
528 if (uboot_tag == UBOOT_TAG_CMDLINE)
529 append_cmdline = true;
530
531ignore_uboot_args:
532
533 if (use_embedded_dtb) {
476 machine_desc = setup_machine_fdt(__dtb_start); 534 machine_desc = setup_machine_fdt(__dtb_start);
477 if (!machine_desc) 535 if (!machine_desc)
478 panic("Embedded DT invalid\n"); 536 panic("Embedded DT invalid\n");
537 }
479 538
480 /* 539 /*
481 * If we are here, it is established that @uboot_arg didn't 540 * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
482 * point to DT blob. Instead if u-boot says it is cmdline, 541 * append processing can only happen after.
483 * append to embedded DT cmdline. 542 */
484 * setup_machine_fdt() would have populated @boot_command_line 543 if (append_cmdline) {
485 */ 544 /* Ensure a whitespace between the 2 cmdlines */
486 if (uboot_tag == 1) { 545 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
487 /* Ensure a whitespace between the 2 cmdlines */ 546 strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
488 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
489 strlcat(boot_command_line, uboot_arg,
490 COMMAND_LINE_SIZE);
491 }
492 } 547 }
548}
549
550void __init setup_arch(char **cmdline_p)
551{
552 handle_uboot_args();
493 553
494 /* Save unparsed command line copy for /proc/cmdline */ 554 /* Save unparsed command line copy for /proc/cmdline */
495 *cmdline_p = boot_command_line; 555 *cmdline_p = boot_command_line;
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index e8d9fb452346..215f515442e0 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,6 +18,8 @@
18#include <asm/arcregs.h> 18#include <asm/arcregs.h>
19#include <asm/irqflags.h> 19#include <asm/irqflags.h>
20 20
21#define ARC_PATH_MAX 256
22
21/* 23/*
22 * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) 24 * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
23 * -Prints 3 regs per line and a CR. 25 * -Prints 3 regs per line and a CR.
@@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs)
58 print_reg_file(&(cregs->r13), 13); 60 print_reg_file(&(cregs->r13), 13);
59} 61}
60 62
61static void print_task_path_n_nm(struct task_struct *tsk, char *buf) 63static void print_task_path_n_nm(struct task_struct *tsk)
62{ 64{
63 char *path_nm = NULL; 65 char *path_nm = NULL;
64 struct mm_struct *mm; 66 struct mm_struct *mm;
65 struct file *exe_file; 67 struct file *exe_file;
68 char buf[ARC_PATH_MAX];
66 69
67 mm = get_task_mm(tsk); 70 mm = get_task_mm(tsk);
68 if (!mm) 71 if (!mm)
@@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
72 mmput(mm); 75 mmput(mm);
73 76
74 if (exe_file) { 77 if (exe_file) {
75 path_nm = file_path(exe_file, buf, 255); 78 path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
76 fput(exe_file); 79 fput(exe_file);
77 } 80 }
78 81
@@ -80,10 +83,9 @@ done:
80 pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); 83 pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
81} 84}
82 85
83static void show_faulting_vma(unsigned long address, char *buf) 86static void show_faulting_vma(unsigned long address)
84{ 87{
85 struct vm_area_struct *vma; 88 struct vm_area_struct *vma;
86 char *nm = buf;
87 struct mm_struct *active_mm = current->active_mm; 89 struct mm_struct *active_mm = current->active_mm;
88 90
89 /* can't use print_vma_addr() yet as it doesn't check for 91 /* can't use print_vma_addr() yet as it doesn't check for
@@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf)
96 * if the container VMA is not found 98 * if the container VMA is not found
97 */ 99 */
98 if (vma && (vma->vm_start <= address)) { 100 if (vma && (vma->vm_start <= address)) {
101 char buf[ARC_PATH_MAX];
102 char *nm = "?";
103
99 if (vma->vm_file) { 104 if (vma->vm_file) {
100 nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); 105 nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
101 if (IS_ERR(nm)) 106 if (IS_ERR(nm))
102 nm = "?"; 107 nm = "?";
103 } 108 }
@@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs)
173{ 178{
174 struct task_struct *tsk = current; 179 struct task_struct *tsk = current;
175 struct callee_regs *cregs; 180 struct callee_regs *cregs;
176 char *buf;
177 181
178 buf = (char *)__get_free_page(GFP_KERNEL); 182 /*
179 if (!buf) 183 * generic code calls us with preemption disabled, but some calls
180 return; 184 * here could sleep, so re-enable to avoid lockdep splat
185 */
186 preempt_enable();
181 187
182 print_task_path_n_nm(tsk, buf); 188 print_task_path_n_nm(tsk);
183 show_regs_print_info(KERN_INFO); 189 show_regs_print_info(KERN_INFO);
184 190
185 show_ecr_verbose(regs); 191 show_ecr_verbose(regs);
@@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs)
189 (void *)regs->blink, (void *)regs->ret); 195 (void *)regs->blink, (void *)regs->ret);
190 196
191 if (user_mode(regs)) 197 if (user_mode(regs))
192 show_faulting_vma(regs->ret, buf); /* faulting code, not data */ 198 show_faulting_vma(regs->ret); /* faulting code, not data */
193 199
194 pr_info("[STAT32]: 0x%08lx", regs->status32); 200 pr_info("[STAT32]: 0x%08lx", regs->status32);
195 201
@@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs)
222 if (cregs) 228 if (cregs)
223 show_callee_regs(cregs); 229 show_callee_regs(cregs);
224 230
225 free_page((unsigned long)buf); 231 preempt_disable();
226} 232}
227 233
228void show_kernel_fault_diag(const char *str, struct pt_regs *regs, 234void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index d61044dd8b58..ea14b0bf3116 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -25,15 +25,11 @@
25#endif 25#endif
26 26
27#ifdef CONFIG_ARC_HAS_LL64 27#ifdef CONFIG_ARC_HAS_LL64
28# define PREFETCH_READ(RX) prefetch [RX, 56]
29# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
30# define LOADX(DST,RX) ldd.ab DST, [RX, 8] 28# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
31# define STOREX(SRC,RX) std.ab SRC, [RX, 8] 29# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
32# define ZOLSHFT 5 30# define ZOLSHFT 5
33# define ZOLAND 0x1F 31# define ZOLAND 0x1F
34#else 32#else
35# define PREFETCH_READ(RX) prefetch [RX, 28]
36# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
37# define LOADX(DST,RX) ld.ab DST, [RX, 4] 33# define LOADX(DST,RX) ld.ab DST, [RX, 4]
38# define STOREX(SRC,RX) st.ab SRC, [RX, 4] 34# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
39# define ZOLSHFT 4 35# define ZOLSHFT 4
@@ -41,8 +37,6 @@
41#endif 37#endif
42 38
43ENTRY_CFI(memcpy) 39ENTRY_CFI(memcpy)
44 prefetch [r1] ; Prefetch the read location
45 prefetchw [r0] ; Prefetch the write location
46 mov.f 0, r2 40 mov.f 0, r2
47;;; if size is zero 41;;; if size is zero
48 jz.d [blink] 42 jz.d [blink]
@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
72 lpnz @.Lcopy32_64bytes 66 lpnz @.Lcopy32_64bytes
73 ;; LOOP START 67 ;; LOOP START
74 LOADX (r6, r1) 68 LOADX (r6, r1)
75 PREFETCH_READ (r1)
76 PREFETCH_WRITE (r3)
77 LOADX (r8, r1) 69 LOADX (r8, r1)
78 LOADX (r10, r1) 70 LOADX (r10, r1)
79 LOADX (r4, r1) 71 LOADX (r4, r1)
@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
117 lpnz @.Lcopy8bytes_1 109 lpnz @.Lcopy8bytes_1
118 ;; LOOP START 110 ;; LOOP START
119 ld.ab r6, [r1, 4] 111 ld.ab r6, [r1, 4]
120 prefetch [r1, 28] ;Prefetch the next read location
121 ld.ab r8, [r1,4] 112 ld.ab r8, [r1,4]
122 prefetchw [r3, 32] ;Prefetch the next write location
123 113
124 SHIFT_1 (r7, r6, 24) 114 SHIFT_1 (r7, r6, 24)
125 or r7, r7, r5 115 or r7, r7, r5
@@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
162 lpnz @.Lcopy8bytes_2 152 lpnz @.Lcopy8bytes_2
163 ;; LOOP START 153 ;; LOOP START
164 ld.ab r6, [r1, 4] 154 ld.ab r6, [r1, 4]
165 prefetch [r1, 28] ;Prefetch the next read location
166 ld.ab r8, [r1,4] 155 ld.ab r8, [r1,4]
167 prefetchw [r3, 32] ;Prefetch the next write location
168 156
169 SHIFT_1 (r7, r6, 16) 157 SHIFT_1 (r7, r6, 16)
170 or r7, r7, r5 158 or r7, r7, r5
@@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
204 lpnz @.Lcopy8bytes_3 192 lpnz @.Lcopy8bytes_3
205 ;; LOOP START 193 ;; LOOP START
206 ld.ab r6, [r1, 4] 194 ld.ab r6, [r1, 4]
207 prefetch [r1, 28] ;Prefetch the next read location
208 ld.ab r8, [r1,4] 195 ld.ab r8, [r1,4]
209 prefetchw [r3, 32] ;Prefetch the next write location
210 196
211 SHIFT_1 (r7, r6, 8) 197 SHIFT_1 (r7, r6, 8)
212 or r7, r7, r5 198 or r7, r7, r5
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 62ad4bcb841a..f230bb7092fd 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -7,11 +7,39 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/cache.h>
10 11
11#undef PREALLOC_NOT_AVAIL 12/*
13 * The memset implementation below is optimized to use prefetchw and prealloc
14 * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
15 * If you want to implement optimized memset for other possible L1 data cache
16 * line lengths (32B and 128B) you should rewrite code carefully checking
17 * we don't call any prefetchw/prealloc instruction for L1 cache lines which
18 * don't belongs to memset area.
19 */
20
21#if L1_CACHE_SHIFT == 6
22
23.macro PREALLOC_INSTR reg, off
24 prealloc [\reg, \off]
25.endm
26
27.macro PREFETCHW_INSTR reg, off
28 prefetchw [\reg, \off]
29.endm
30
31#else
32
33.macro PREALLOC_INSTR
34.endm
35
36.macro PREFETCHW_INSTR
37.endm
38
39#endif
12 40
13ENTRY_CFI(memset) 41ENTRY_CFI(memset)
14 prefetchw [r0] ; Prefetch the write location 42 PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
15 mov.f 0, r2 43 mov.f 0, r2
16;;; if size is zero 44;;; if size is zero
17 jz.d [blink] 45 jz.d [blink]
@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
48 76
49 lpnz @.Lset64bytes 77 lpnz @.Lset64bytes
50 ;; LOOP START 78 ;; LOOP START
51#ifdef PREALLOC_NOT_AVAIL 79 PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
52 prefetchw [r3, 64] ;Prefetch the next write location 80
53#else
54 prealloc [r3, 64]
55#endif
56#ifdef CONFIG_ARC_HAS_LL64 81#ifdef CONFIG_ARC_HAS_LL64
57 std.ab r4, [r3, 8] 82 std.ab r4, [r3, 8]
58 std.ab r4, [r3, 8] 83 std.ab r4, [r3, 8]
@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
85 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes 110 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
86 lpnz .Lset32bytes 111 lpnz .Lset32bytes
87 ;; LOOP START 112 ;; LOOP START
88 prefetchw [r3, 32] ;Prefetch the next write location
89#ifdef CONFIG_ARC_HAS_LL64 113#ifdef CONFIG_ARC_HAS_LL64
90 std.ab r4, [r3, 8] 114 std.ab r4, [r3, 8]
91 std.ab r4, [r3, 8] 115 std.ab r4, [r3, 8]
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index a1d723197084..8df1638259f3 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -141,12 +141,17 @@ good_area:
141 */ 141 */
142 fault = handle_mm_fault(vma, address, flags); 142 fault = handle_mm_fault(vma, address, flags);
143 143
144 /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
145 if (fatal_signal_pending(current)) { 144 if (fatal_signal_pending(current)) {
146 if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY)) 145
147 up_read(&mm->mmap_sem); 146 /*
148 if (user_mode(regs)) 147 * if fault retry, mmap_sem already relinquished by core mm
148 * so OK to return to user mode (with signal handled first)
149 */
150 if (fault & VM_FAULT_RETRY) {
151 if (!user_mode(regs))
152 goto no_context;
149 return; 153 return;
154 }
150 } 155 }
151 156
152 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 157 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 43bf4c3a1290..e1ab2d7f1d64 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -119,7 +119,8 @@ void __init setup_arch_memory(void)
119 */ 119 */
120 120
121 memblock_add_node(low_mem_start, low_mem_sz, 0); 121 memblock_add_node(low_mem_start, low_mem_sz, 0);
122 memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); 122 memblock_reserve(CONFIG_LINUX_LINK_BASE,
123 __pa(_end) - CONFIG_LINUX_LINK_BASE);
123 124
124#ifdef CONFIG_BLK_DEV_INITRD 125#ifdef CONFIG_BLK_DEV_INITRD
125 if (phys_initrd_size) { 126 if (phys_initrd_size) {
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index f25c085b9874..23e00216e5a5 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK
9 bool "ARC HS Development Kit SOC" 9 bool "ARC HS Development Kit SOC"
10 depends on ISA_ARCV2 10 depends on ISA_ARCV2
11 select ARC_HAS_ACCL_REGS 11 select ARC_HAS_ACCL_REGS
12 select ARC_IRQ_NO_AUTOSAVE
12 select CLK_HSDK 13 select CLK_HSDK
13 select RESET_HSDK 14 select RESET_HSDK
14 select HAVE_PCI 15 select HAVE_PCI
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 664e918e2624..26524b75970a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1400,6 +1400,7 @@ config NR_CPUS
1400config HOTPLUG_CPU 1400config HOTPLUG_CPU
1401 bool "Support for hot-pluggable CPUs" 1401 bool "Support for hot-pluggable CPUs"
1402 depends on SMP 1402 depends on SMP
1403 select GENERIC_IRQ_MIGRATION
1403 help 1404 help
1404 Say Y here to experiment with turning CPUs off and on. CPUs 1405 Say Y here to experiment with turning CPUs off and on. CPUs
1405 can be controlled through /sys/devices/system/cpu. 1406 can be controlled through /sys/devices/system/cpu.
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index b67f5fee1469..dce5be5df97b 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -729,7 +729,7 @@
729 729
730&cpsw_emac0 { 730&cpsw_emac0 {
731 phy-handle = <&ethphy0>; 731 phy-handle = <&ethphy0>;
732 phy-mode = "rgmii-txid"; 732 phy-mode = "rgmii-id";
733}; 733};
734 734
735&tscadc { 735&tscadc {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 172c0224e7f6..b128998097ce 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -651,13 +651,13 @@
651 651
652&cpsw_emac0 { 652&cpsw_emac0 {
653 phy-handle = <&ethphy0>; 653 phy-handle = <&ethphy0>;
654 phy-mode = "rgmii-txid"; 654 phy-mode = "rgmii-id";
655 dual_emac_res_vlan = <1>; 655 dual_emac_res_vlan = <1>;
656}; 656};
657 657
658&cpsw_emac1 { 658&cpsw_emac1 {
659 phy-handle = <&ethphy1>; 659 phy-handle = <&ethphy1>;
660 phy-mode = "rgmii-txid"; 660 phy-mode = "rgmii-id";
661 dual_emac_res_vlan = <2>; 661 dual_emac_res_vlan = <2>;
662}; 662};
663 663
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index d0fd68873689..5b250060f6dd 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -215,7 +215,7 @@
215 pinctrl-names = "default"; 215 pinctrl-names = "default";
216 pinctrl-0 = <&mmc1_pins>; 216 pinctrl-0 = <&mmc1_pins>;
217 bus-width = <0x4>; 217 bus-width = <0x4>;
218 cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; 218 cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
219 cd-inverted; 219 cd-inverted;
220 max-frequency = <26000000>; 220 max-frequency = <26000000>;
221 vmmc-supply = <&vmmcsd_fixed>; 221 vmmc-supply = <&vmmcsd_fixed>;
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index f3ac7483afed..5d04dc68cf57 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -144,30 +144,32 @@
144 status = "okay"; 144 status = "okay";
145 }; 145 };
146 146
147 nand@d0000 { 147 nand-controller@d0000 {
148 status = "okay"; 148 status = "okay";
149 label = "pxa3xx_nand-0";
150 num-cs = <1>;
151 marvell,nand-keep-config;
152 nand-on-flash-bbt;
153
154 partitions {
155 compatible = "fixed-partitions";
156 #address-cells = <1>;
157 #size-cells = <1>;
158
159 partition@0 {
160 label = "U-Boot";
161 reg = <0 0x800000>;
162 };
163 partition@800000 {
164 label = "Linux";
165 reg = <0x800000 0x800000>;
166 };
167 partition@1000000 {
168 label = "Filesystem";
169 reg = <0x1000000 0x3f000000>;
170 149
150 nand@0 {
151 reg = <0>;
152 label = "pxa3xx_nand-0";
153 nand-rb = <0>;
154 nand-on-flash-bbt;
155
156 partitions {
157 compatible = "fixed-partitions";
158 #address-cells = <1>;
159 #size-cells = <1>;
160
161 partition@0 {
162 label = "U-Boot";
163 reg = <0 0x800000>;
164 };
165 partition@800000 {
166 label = "Linux";
167 reg = <0x800000 0x800000>;
168 };
169 partition@1000000 {
170 label = "Filesystem";
171 reg = <0x1000000 0x3f000000>;
172 };
171 }; 173 };
172 }; 174 };
173 }; 175 };
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 1139e9469a83..b4cca507cf13 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -160,12 +160,15 @@
160 status = "okay"; 160 status = "okay";
161 }; 161 };
162 162
163 nand@d0000 { 163 nand-controller@d0000 {
164 status = "okay"; 164 status = "okay";
165 label = "pxa3xx_nand-0"; 165
166 num-cs = <1>; 166 nand@0 {
167 marvell,nand-keep-config; 167 reg = <0>;
168 nand-on-flash-bbt; 168 label = "pxa3xx_nand-0";
169 nand-rb = <0>;
170 nand-on-flash-bbt;
171 };
169 }; 172 };
170 }; 173 };
171 174
diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
index bbbb38888bb8..87dcb502f72d 100644
--- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
+++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
@@ -81,49 +81,52 @@
81 81
82 }; 82 };
83 83
84 nand@d0000 { 84 nand-controller@d0000 {
85 status = "okay"; 85 status = "okay";
86 label = "pxa3xx_nand-0";
87 num-cs = <1>;
88 marvell,nand-keep-config;
89 nand-on-flash-bbt;
90
91 partitions {
92 compatible = "fixed-partitions";
93 #address-cells = <1>;
94 #size-cells = <1>;
95
96 partition@0 {
97 label = "u-boot";
98 reg = <0x00000000 0x000e0000>;
99 read-only;
100 };
101
102 partition@e0000 {
103 label = "u-boot-env";
104 reg = <0x000e0000 0x00020000>;
105 read-only;
106 };
107
108 partition@100000 {
109 label = "u-boot-env2";
110 reg = <0x00100000 0x00020000>;
111 read-only;
112 };
113
114 partition@120000 {
115 label = "zImage";
116 reg = <0x00120000 0x00400000>;
117 };
118
119 partition@520000 {
120 label = "initrd";
121 reg = <0x00520000 0x00400000>;
122 };
123 86
124 partition@e00000 { 87 nand@0 {
125 label = "boot"; 88 reg = <0>;
126 reg = <0x00e00000 0x3f200000>; 89 label = "pxa3xx_nand-0";
90 nand-rb = <0>;
91 nand-on-flash-bbt;
92
93 partitions {
94 compatible = "fixed-partitions";
95 #address-cells = <1>;
96 #size-cells = <1>;
97
98 partition@0 {
99 label = "u-boot";
100 reg = <0x00000000 0x000e0000>;
101 read-only;
102 };
103
104 partition@e0000 {
105 label = "u-boot-env";
106 reg = <0x000e0000 0x00020000>;
107 read-only;
108 };
109
110 partition@100000 {
111 label = "u-boot-env2";
112 reg = <0x00100000 0x00020000>;
113 read-only;
114 };
115
116 partition@120000 {
117 label = "zImage";
118 reg = <0x00120000 0x00400000>;
119 };
120
121 partition@520000 {
122 label = "initrd";
123 reg = <0x00520000 0x00400000>;
124 };
125
126 partition@e00000 {
127 label = "boot";
128 reg = <0x00e00000 0x3f200000>;
129 };
127 }; 130 };
128 }; 131 };
129 }; 132 };
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index a3c9b346721d..f04bc3e15332 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -94,6 +94,28 @@
94 regulator-boot-on; 94 regulator-boot-on;
95 }; 95 };
96 96
97 baseboard_3v3: fixedregulator-3v3 {
98 /* TPS73701DCQ */
99 compatible = "regulator-fixed";
100 regulator-name = "baseboard_3v3";
101 regulator-min-microvolt = <3300000>;
102 regulator-max-microvolt = <3300000>;
103 vin-supply = <&vbat>;
104 regulator-always-on;
105 regulator-boot-on;
106 };
107
108 baseboard_1v8: fixedregulator-1v8 {
109 /* TPS73701DCQ */
110 compatible = "regulator-fixed";
111 regulator-name = "baseboard_1v8";
112 regulator-min-microvolt = <1800000>;
113 regulator-max-microvolt = <1800000>;
114 vin-supply = <&vbat>;
115 regulator-always-on;
116 regulator-boot-on;
117 };
118
97 backlight_lcd: backlight-regulator { 119 backlight_lcd: backlight-regulator {
98 compatible = "regulator-fixed"; 120 compatible = "regulator-fixed";
99 regulator-name = "lcd_backlight_pwr"; 121 regulator-name = "lcd_backlight_pwr";
@@ -105,7 +127,7 @@
105 127
106 sound { 128 sound {
107 compatible = "simple-audio-card"; 129 compatible = "simple-audio-card";
108 simple-audio-card,name = "DA850/OMAP-L138 EVM"; 130 simple-audio-card,name = "DA850-OMAPL138 EVM";
109 simple-audio-card,widgets = 131 simple-audio-card,widgets =
110 "Line", "Line In", 132 "Line", "Line In",
111 "Line", "Line Out"; 133 "Line", "Line Out";
@@ -210,10 +232,9 @@
210 232
211 /* Regulators */ 233 /* Regulators */
212 IOVDD-supply = <&vdcdc2_reg>; 234 IOVDD-supply = <&vdcdc2_reg>;
213 /* Derived from VBAT: Baseboard 3.3V / 1.8V */ 235 AVDD-supply = <&baseboard_3v3>;
214 AVDD-supply = <&vbat>; 236 DRVDD-supply = <&baseboard_3v3>;
215 DRVDD-supply = <&vbat>; 237 DVDD-supply = <&baseboard_1v8>;
216 DVDD-supply = <&vbat>;
217 }; 238 };
218 tca6416: gpio@20 { 239 tca6416: gpio@20 {
219 compatible = "ti,tca6416"; 240 compatible = "ti,tca6416";
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
index 0177e3ed20fe..3a2fa6e035a3 100644
--- a/arch/arm/boot/dts/da850-lcdk.dts
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -39,9 +39,39 @@
39 }; 39 };
40 }; 40 };
41 41
42 vcc_5vd: fixedregulator-vcc_5vd {
43 compatible = "regulator-fixed";
44 regulator-name = "vcc_5vd";
45 regulator-min-microvolt = <5000000>;
46 regulator-max-microvolt = <5000000>;
47 regulator-boot-on;
48 };
49
50 vcc_3v3d: fixedregulator-vcc_3v3d {
51 /* TPS650250 - VDCDC1 */
52 compatible = "regulator-fixed";
53 regulator-name = "vcc_3v3d";
54 regulator-min-microvolt = <3300000>;
55 regulator-max-microvolt = <3300000>;
56 vin-supply = <&vcc_5vd>;
57 regulator-always-on;
58 regulator-boot-on;
59 };
60
61 vcc_1v8d: fixedregulator-vcc_1v8d {
62 /* TPS650250 - VDCDC2 */
63 compatible = "regulator-fixed";
64 regulator-name = "vcc_1v8d";
65 regulator-min-microvolt = <1800000>;
66 regulator-max-microvolt = <1800000>;
67 vin-supply = <&vcc_5vd>;
68 regulator-always-on;
69 regulator-boot-on;
70 };
71
42 sound { 72 sound {
43 compatible = "simple-audio-card"; 73 compatible = "simple-audio-card";
44 simple-audio-card,name = "DA850/OMAP-L138 LCDK"; 74 simple-audio-card,name = "DA850-OMAPL138 LCDK";
45 simple-audio-card,widgets = 75 simple-audio-card,widgets =
46 "Line", "Line In", 76 "Line", "Line In",
47 "Line", "Line Out"; 77 "Line", "Line Out";
@@ -221,6 +251,12 @@
221 compatible = "ti,tlv320aic3106"; 251 compatible = "ti,tlv320aic3106";
222 reg = <0x18>; 252 reg = <0x18>;
223 status = "okay"; 253 status = "okay";
254
255 /* Regulators */
256 IOVDD-supply = <&vcc_3v3d>;
257 AVDD-supply = <&vcc_3v3d>;
258 DRVDD-supply = <&vcc_3v3d>;
259 DVDD-supply = <&vcc_1v8d>;
224 }; 260 };
225}; 261};
226 262
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index 47aa53ba6b92..559659b399d0 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -476,7 +476,7 @@
476 clocksource: timer@20000 { 476 clocksource: timer@20000 {
477 compatible = "ti,da830-timer"; 477 compatible = "ti,da830-timer";
478 reg = <0x20000 0x1000>; 478 reg = <0x20000 0x1000>;
479 interrupts = <12>, <13>; 479 interrupts = <21>, <22>;
480 interrupt-names = "tint12", "tint34"; 480 interrupt-names = "tint12", "tint34";
481 clocks = <&pll0_auxclk>; 481 clocks = <&pll0_auxclk>;
482 }; 482 };
diff --git a/arch/arm/boot/dts/imx6q-pistachio.dts b/arch/arm/boot/dts/imx6q-pistachio.dts
index 5edf858c8b86..a31b17eaf51c 100644
--- a/arch/arm/boot/dts/imx6q-pistachio.dts
+++ b/arch/arm/boot/dts/imx6q-pistachio.dts
@@ -103,7 +103,7 @@
103 power { 103 power {
104 label = "Power Button"; 104 label = "Power Button";
105 gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; 105 gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
106 gpio-key,wakeup; 106 wakeup-source;
107 linux,code = <KEY_POWER>; 107 linux,code = <KEY_POWER>;
108 }; 108 };
109 }; 109 };
diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts
index d8163705363e..4a31a415f88e 100644
--- a/arch/arm/boot/dts/imx6sll-evk.dts
+++ b/arch/arm/boot/dts/imx6sll-evk.dts
@@ -309,7 +309,7 @@
309 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 309 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
310 cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; 310 cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
311 keep-power-in-suspend; 311 keep-power-in-suspend;
312 enable-sdio-wakeup; 312 wakeup-source;
313 vmmc-supply = <&reg_sd3_vmmc>; 313 vmmc-supply = <&reg_sd3_vmmc>;
314 status = "okay"; 314 status = "okay";
315}; 315};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 272ff6133ec1..d1375d3650fd 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -467,7 +467,7 @@
467 }; 467 };
468 468
469 gpt: gpt@2098000 { 469 gpt: gpt@2098000 {
470 compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt"; 470 compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
471 reg = <0x02098000 0x4000>; 471 reg = <0x02098000 0x4000>;
472 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; 472 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
473 clocks = <&clks IMX6SX_CLK_GPT_BUS>, 473 clocks = <&clks IMX6SX_CLK_GPT_BUS>,
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index cbaf06f2f78e..eb917462b219 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -36,8 +36,8 @@
36 compatible = "gpio-fan"; 36 compatible = "gpio-fan";
37 pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>; 37 pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
38 pinctrl-names = "default"; 38 pinctrl-names = "default";
39 gpios = <&gpio1 14 GPIO_ACTIVE_LOW 39 gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
40 &gpio1 13 GPIO_ACTIVE_LOW>; 40 &gpio1 13 GPIO_ACTIVE_HIGH>;
41 gpio-fan,speed-map = <0 0 41 gpio-fan,speed-map = <0 0
42 3000 1 42 3000 1
43 6000 2>; 43 6000 2>;
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index e4645f612712..2ab74860d962 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -274,7 +274,7 @@
274 compatible = "amlogic,meson6-dwmac", "snps,dwmac"; 274 compatible = "amlogic,meson6-dwmac", "snps,dwmac";
275 reg = <0xc9410000 0x10000 275 reg = <0xc9410000 0x10000
276 0xc1108108 0x4>; 276 0xc1108108 0x4>;
277 interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>; 277 interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
278 interrupt-names = "macirq"; 278 interrupt-names = "macirq";
279 status = "disabled"; 279 status = "disabled";
280 }; 280 };
diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
index 0872f6e3abf5..d50fc2f60fa3 100644
--- a/arch/arm/boot/dts/meson8b-ec100.dts
+++ b/arch/arm/boot/dts/meson8b-ec100.dts
@@ -205,8 +205,7 @@
205 cap-sd-highspeed; 205 cap-sd-highspeed;
206 disable-wp; 206 disable-wp;
207 207
208 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 208 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
209 cd-inverted;
210 209
211 vmmc-supply = <&vcc_3v3>; 210 vmmc-supply = <&vcc_3v3>;
212 }; 211 };
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
index 58669abda259..0f0a46ddf3ff 100644
--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
+++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
@@ -221,7 +221,6 @@
221 /* Realtek RTL8211F (0x001cc916) */ 221 /* Realtek RTL8211F (0x001cc916) */
222 eth_phy: ethernet-phy@0 { 222 eth_phy: ethernet-phy@0 {
223 reg = <0>; 223 reg = <0>;
224 eee-broken-1000t;
225 interrupt-parent = <&gpio_intc>; 224 interrupt-parent = <&gpio_intc>;
226 /* GPIOH_3 */ 225 /* GPIOH_3 */
227 interrupts = <17 IRQ_TYPE_LEVEL_LOW>; 226 interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
@@ -273,8 +272,7 @@
273 cap-sd-highspeed; 272 cap-sd-highspeed;
274 disable-wp; 273 disable-wp;
275 274
276 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 275 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
277 cd-inverted;
278 276
279 vmmc-supply = <&tflash_vdd>; 277 vmmc-supply = <&tflash_vdd>;
280 vqmmc-supply = <&tf_io>; 278 vqmmc-supply = <&tf_io>;
diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
index f5853610b20b..6ac02beb5fa7 100644
--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
+++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
@@ -206,8 +206,7 @@
206 cap-sd-highspeed; 206 cap-sd-highspeed;
207 disable-wp; 207 disable-wp;
208 208
209 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 209 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
210 cd-inverted;
211 210
212 vmmc-supply = <&vcc_3v3>; 211 vmmc-supply = <&vcc_3v3>;
213 }; 212 };
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index ddc7a7bb33c0..f57acf8f66b9 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -105,7 +105,7 @@
105 interrupts-extended = < 105 interrupts-extended = <
106 &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 106 &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
107 &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 107 &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
108 &cpcap 48 1 108 &cpcap 48 0
109 >; 109 >;
110 interrupt-names = 110 interrupt-names =
111 "id_ground", "id_float", "se0conn", "vbusvld", 111 "id_ground", "id_float", "se0conn", "vbusvld",
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index e53d32691308..93b420934e8e 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -714,11 +714,7 @@
714 714
715 vdda-supply = <&vdac>; 715 vdda-supply = <&vdac>;
716 716
717 #address-cells = <1>;
718 #size-cells = <0>;
719
720 port { 717 port {
721 reg = <0>;
722 venc_out: endpoint { 718 venc_out: endpoint {
723 remote-endpoint = <&opa_in>; 719 remote-endpoint = <&opa_in>;
724 ti,channels = <1>; 720 ti,channels = <1>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 182a53991c90..826920e6b878 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -814,7 +814,7 @@
814 /* For debugging, it is often good idea to remove this GPIO. 814 /* For debugging, it is often good idea to remove this GPIO.
815 It means you can remove back cover (to reboot by removing 815 It means you can remove back cover (to reboot by removing
816 battery) and still use the MMC card. */ 816 battery) and still use the MMC card. */
817 cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ 817 cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
818}; 818};
819 819
820/* most boards use vaux3, only some old versions use vmmc2 instead */ 820/* most boards use vaux3, only some old versions use vmmc2 instead */
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 0d9b85317529..e142e6c70a59 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -370,6 +370,19 @@
370 compatible = "ti,omap2-onenand"; 370 compatible = "ti,omap2-onenand";
371 reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ 371 reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
372 372
373 /*
374 * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
375 * bootloader set values when booted with v4.19 using both N950
376 * and N9 devices (OneNAND Manufacturer: Samsung):
377 *
378 * gpmc cs0 before gpmc_cs_program_settings:
379 * cs0 GPMC_CS_CONFIG1: 0xfd001202
380 * cs0 GPMC_CS_CONFIG2: 0x00181800
381 * cs0 GPMC_CS_CONFIG3: 0x00030300
382 * cs0 GPMC_CS_CONFIG4: 0x18001804
383 * cs0 GPMC_CS_CONFIG5: 0x03171d1d
384 * cs0 GPMC_CS_CONFIG6: 0x97080000
385 */
373 gpmc,sync-read; 386 gpmc,sync-read;
374 gpmc,sync-write; 387 gpmc,sync-write;
375 gpmc,burst-length = <16>; 388 gpmc,burst-length = <16>;
@@ -379,26 +392,27 @@
379 gpmc,device-width = <2>; 392 gpmc,device-width = <2>;
380 gpmc,mux-add-data = <2>; 393 gpmc,mux-add-data = <2>;
381 gpmc,cs-on-ns = <0>; 394 gpmc,cs-on-ns = <0>;
382 gpmc,cs-rd-off-ns = <87>; 395 gpmc,cs-rd-off-ns = <122>;
383 gpmc,cs-wr-off-ns = <87>; 396 gpmc,cs-wr-off-ns = <122>;
384 gpmc,adv-on-ns = <0>; 397 gpmc,adv-on-ns = <0>;
385 gpmc,adv-rd-off-ns = <10>; 398 gpmc,adv-rd-off-ns = <15>;
386 gpmc,adv-wr-off-ns = <10>; 399 gpmc,adv-wr-off-ns = <15>;
387 gpmc,oe-on-ns = <15>; 400 gpmc,oe-on-ns = <20>;
388 gpmc,oe-off-ns = <87>; 401 gpmc,oe-off-ns = <122>;
389 gpmc,we-on-ns = <0>; 402 gpmc,we-on-ns = <0>;
390 gpmc,we-off-ns = <87>; 403 gpmc,we-off-ns = <122>;
391 gpmc,rd-cycle-ns = <112>; 404 gpmc,rd-cycle-ns = <148>;
392 gpmc,wr-cycle-ns = <112>; 405 gpmc,wr-cycle-ns = <148>;
393 gpmc,access-ns = <81>; 406 gpmc,access-ns = <117>;
394 gpmc,page-burst-access-ns = <15>; 407 gpmc,page-burst-access-ns = <15>;
395 gpmc,bus-turnaround-ns = <0>; 408 gpmc,bus-turnaround-ns = <0>;
396 gpmc,cycle2cycle-delay-ns = <0>; 409 gpmc,cycle2cycle-delay-ns = <0>;
397 gpmc,wait-monitoring-ns = <0>; 410 gpmc,wait-monitoring-ns = <0>;
398 gpmc,clk-activation-ns = <5>; 411 gpmc,clk-activation-ns = <10>;
399 gpmc,wr-data-mux-bus-ns = <30>; 412 gpmc,wr-data-mux-bus-ns = <40>;
400 gpmc,wr-access-ns = <81>; 413 gpmc,wr-access-ns = <117>;
401 gpmc,sync-clk-ps = <15000>; 414
415 gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
402 416
403 /* 417 /*
404 * MTD partition table corresponding to Nokia's MeeGo 1.2 418 * MTD partition table corresponding to Nokia's MeeGo 1.2
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 04758a2a87f0..67d77eee9433 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -644,6 +644,17 @@
644 }; 644 };
645}; 645};
646 646
647/* Configure pwm clock source for timers 8 & 9 */
648&timer8 {
649 assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
650 assigned-clock-parents = <&sys_clkin_ck>;
651};
652
653&timer9 {
654 assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
655 assigned-clock-parents = <&sys_clkin_ck>;
656};
657
647/* 658/*
648 * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for 659 * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
649 * uart1 wakeirq. 660 * uart1 wakeirq.
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index bc853ebeda22..61a06f6add3c 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -317,7 +317,8 @@
317 317
318 palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { 318 palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
319 pinctrl-single,pins = < 319 pinctrl-single,pins = <
320 OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ 320 /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
321 OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
321 >; 322 >;
322 }; 323 };
323 324
@@ -385,7 +386,8 @@
385 386
386 palmas: palmas@48 { 387 palmas: palmas@48 {
387 compatible = "ti,palmas"; 388 compatible = "ti,palmas";
388 interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ 389 /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
390 interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
389 reg = <0x48>; 391 reg = <0x48>;
390 interrupt-controller; 392 interrupt-controller;
391 #interrupt-cells = <2>; 393 #interrupt-cells = <2>;
@@ -651,7 +653,8 @@
651 pinctrl-names = "default"; 653 pinctrl-names = "default";
652 pinctrl-0 = <&twl6040_pins>; 654 pinctrl-0 = <&twl6040_pins>;
653 655
654 interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ 656 /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
657 interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
655 658
656 /* audpwron gpio defined in the board specific dts */ 659 /* audpwron gpio defined in the board specific dts */
657 660
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index 5e21fb430a65..e78d3718f145 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -181,6 +181,13 @@
181 OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ 181 OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */
182 >; 182 >;
183 }; 183 };
184
185 palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
186 pinctrl-single,pins = <
187 /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
188 OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
189 >;
190 };
184}; 191};
185 192
186&omap5_pmx_core { 193&omap5_pmx_core {
@@ -414,8 +421,11 @@
414 421
415 palmas: palmas@48 { 422 palmas: palmas@48 {
416 compatible = "ti,palmas"; 423 compatible = "ti,palmas";
417 interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
418 reg = <0x48>; 424 reg = <0x48>;
425 pinctrl-0 = <&palmas_sys_nirq_pins>;
426 pinctrl-names = "default";
427 /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
428 interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
419 interrupt-controller; 429 interrupt-controller;
420 #interrupt-cells = <2>; 430 #interrupt-cells = <2>;
421 ti,system-power-controller; 431 ti,system-power-controller;
diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi
index 9c7e309d9c2c..0960348002ad 100644
--- a/arch/arm/boot/dts/omap5-l4.dtsi
+++ b/arch/arm/boot/dts/omap5-l4.dtsi
@@ -1046,8 +1046,6 @@
1046 <SYSC_IDLE_SMART>, 1046 <SYSC_IDLE_SMART>,
1047 <SYSC_IDLE_SMART_WKUP>; 1047 <SYSC_IDLE_SMART_WKUP>;
1048 ti,syss-mask = <1>; 1048 ti,syss-mask = <1>;
1049 ti,no-reset-on-init;
1050 ti,no-idle-on-init;
1051 /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */ 1049 /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */
1052 clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>; 1050 clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>;
1053 clock-names = "fck"; 1051 clock-names = "fck";
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi
index 3cc33f7ff7fe..3adc158a40bb 100644
--- a/arch/arm/boot/dts/r8a7743.dtsi
+++ b/arch/arm/boot/dts/r8a7743.dtsi
@@ -1681,15 +1681,12 @@
1681 1681
1682 du: display@feb00000 { 1682 du: display@feb00000 {
1683 compatible = "renesas,du-r8a7743"; 1683 compatible = "renesas,du-r8a7743";
1684 reg = <0 0xfeb00000 0 0x40000>, 1684 reg = <0 0xfeb00000 0 0x40000>;
1685 <0 0xfeb90000 0 0x1c>;
1686 reg-names = "du", "lvds.0";
1687 interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, 1685 interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
1688 <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; 1686 <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
1689 clocks = <&cpg CPG_MOD 724>, 1687 clocks = <&cpg CPG_MOD 724>,
1690 <&cpg CPG_MOD 723>, 1688 <&cpg CPG_MOD 723>;
1691 <&cpg CPG_MOD 726>; 1689 clock-names = "du.0", "du.1";
1692 clock-names = "du.0", "du.1", "lvds.0";
1693 status = "disabled"; 1690 status = "disabled";
1694 1691
1695 ports { 1692 ports {
@@ -1704,6 +1701,33 @@
1704 port@1 { 1701 port@1 {
1705 reg = <1>; 1702 reg = <1>;
1706 du_out_lvds0: endpoint { 1703 du_out_lvds0: endpoint {
1704 remote-endpoint = <&lvds0_in>;
1705 };
1706 };
1707 };
1708 };
1709
1710 lvds0: lvds@feb90000 {
1711 compatible = "renesas,r8a7743-lvds";
1712 reg = <0 0xfeb90000 0 0x1c>;
1713 clocks = <&cpg CPG_MOD 726>;
1714 power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
1715 resets = <&cpg 726>;
1716 status = "disabled";
1717
1718 ports {
1719 #address-cells = <1>;
1720 #size-cells = <0>;
1721
1722 port@0 {
1723 reg = <0>;
1724 lvds0_in: endpoint {
1725 remote-endpoint = <&du_out_lvds0>;
1726 };
1727 };
1728 port@1 {
1729 reg = <1>;
1730 lvds0_out: endpoint {
1707 }; 1731 };
1708 }; 1732 };
1709 }; 1733 };
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 4acb501dd3f8..3ed49898f4b2 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -719,7 +719,6 @@
719 pm_qos = <&qos_lcdc0>, 719 pm_qos = <&qos_lcdc0>,
720 <&qos_lcdc1>, 720 <&qos_lcdc1>,
721 <&qos_cif0>, 721 <&qos_cif0>,
722 <&qos_cif1>,
723 <&qos_ipp>, 722 <&qos_ipp>,
724 <&qos_rga>; 723 <&qos_rga>;
725 }; 724 };
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 353d90f99b40..13304b8c5139 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -216,6 +216,7 @@
216 #clock-cells = <0>; 216 #clock-cells = <0>;
217 compatible = "fixed-clock"; 217 compatible = "fixed-clock";
218 clock-frequency = <24000000>; 218 clock-frequency = <24000000>;
219 clock-output-names = "osc24M";
219 }; 220 };
220 221
221 osc32k: clk-32k { 222 osc32k: clk-32k {
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
index 5d23667dc2d2..25540b7694d5 100644
--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
@@ -53,7 +53,7 @@
53 53
54 aliases { 54 aliases {
55 serial0 = &uart0; 55 serial0 = &uart0;
56 /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */ 56 ethernet0 = &emac;
57 ethernet1 = &sdiowifi; 57 ethernet1 = &sdiowifi;
58 }; 58 };
59 59
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index d5f11d6d987e..bc85b6a166c7 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -13,10 +13,25 @@
13 stdout-path = "serial0:115200n8"; 13 stdout-path = "serial0:115200n8";
14 }; 14 };
15 15
16 memory@80000000 { 16 /*
17 * Note that recent version of the device tree compiler (starting with
18 * version 1.4.2) warn about this node containing a reg property, but
19 * missing a unit-address. However, the bootloader on these Chromebook
20 * devices relies on the full name of this node to be exactly /memory.
21 * Adding the unit-address causes the bootloader to create a /memory
22 * node and write the memory bank configuration to that node, which in
23 * turn leads the kernel to believe that the device has 2 GiB of
24 * memory instead of the amount detected by the bootloader.
25 *
26 * The name of this node is effectively ABI and must not be changed.
27 */
28 memory {
29 device_type = "memory";
17 reg = <0x0 0x80000000 0x0 0x80000000>; 30 reg = <0x0 0x80000000 0x0 0x80000000>;
18 }; 31 };
19 32
33 /delete-node/ memory@80000000;
34
20 host1x@50000000 { 35 host1x@50000000 {
21 hdmi@54280000 { 36 hdmi@54280000 {
22 status = "okay"; 37 status = "okay";
diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts
index 689c8930dce3..b08d561d6748 100644
--- a/arch/arm/boot/dts/vf610-bk4.dts
+++ b/arch/arm/boot/dts/vf610-bk4.dts
@@ -110,11 +110,11 @@
110 bus-num = <3>; 110 bus-num = <3>;
111 status = "okay"; 111 status = "okay";
112 spi-slave; 112 spi-slave;
113 #address-cells = <0>;
113 114
114 slave@0 { 115 slave {
115 compatible = "lwn,bk4"; 116 compatible = "lwn,bk4";
116 spi-max-frequency = <30000000>; 117 spi-max-frequency = <30000000>;
117 reg = <0>;
118 }; 118 };
119}; 119};
120 120
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index c883fcbe93b6..46d41140df27 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -25,7 +25,6 @@
25#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
26struct irqaction; 26struct irqaction;
27struct pt_regs; 27struct pt_regs;
28extern void migrate_irqs(void);
29 28
30extern void asm_do_IRQ(unsigned int, struct pt_regs *); 29extern void asm_do_IRQ(unsigned int, struct pt_regs *);
31void handle_IRQ(unsigned int, struct pt_regs *); 30void handle_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index ca56537b61bc..50e89869178a 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -48,6 +48,7 @@
48#define KVM_REQ_SLEEP \ 48#define KVM_REQ_SLEEP \
49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
51#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
51 52
52DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 53DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
53 54
@@ -147,6 +148,13 @@ struct kvm_cpu_context {
147 148
148typedef struct kvm_cpu_context kvm_cpu_context_t; 149typedef struct kvm_cpu_context kvm_cpu_context_t;
149 150
151struct vcpu_reset_state {
152 unsigned long pc;
153 unsigned long r0;
154 bool be;
155 bool reset;
156};
157
150struct kvm_vcpu_arch { 158struct kvm_vcpu_arch {
151 struct kvm_cpu_context ctxt; 159 struct kvm_cpu_context ctxt;
152 160
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
186 /* Cache some mmu pages needed inside spinlock regions */ 194 /* Cache some mmu pages needed inside spinlock regions */
187 struct kvm_mmu_memory_cache mmu_page_cache; 195 struct kvm_mmu_memory_cache mmu_page_cache;
188 196
197 struct vcpu_reset_state reset_state;
198
189 /* Detect first run of a vcpu */ 199 /* Detect first run of a vcpu */
190 bool has_run_once; 200 bool has_run_once;
191}; 201};
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index c4b1d4fb1797..de2089501b8b 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
76#define S2_PMD_MASK PMD_MASK 76#define S2_PMD_MASK PMD_MASK
77#define S2_PMD_SIZE PMD_SIZE 77#define S2_PMD_SIZE PMD_SIZE
78 78
79static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
80{
81 return true;
82}
83
79#endif /* __ARM_S2_PGTABLE_H_ */ 84#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index b3ef061d8b74..2c403e7c782d 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -1 +1,95 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
3#define _ASM_ARM_XEN_PAGE_COHERENT_H
4
5#include <linux/dma-mapping.h>
6#include <asm/page.h>
1#include <xen/arm/page-coherent.h> 7#include <xen/arm/page-coherent.h>
8
9static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
10{
11 if (dev && dev->archdata.dev_dma_ops)
12 return dev->archdata.dev_dma_ops;
13 return get_arch_dma_ops(NULL);
14}
15
16static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
17 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
18{
19 return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
20}
21
22static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
23 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
24{
25 xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
26}
27
28static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
29 dma_addr_t dev_addr, unsigned long offset, size_t size,
30 enum dma_data_direction dir, unsigned long attrs)
31{
32 unsigned long page_pfn = page_to_xen_pfn(page);
33 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
34 unsigned long compound_pages =
35 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
36 bool local = (page_pfn <= dev_pfn) &&
37 (dev_pfn - page_pfn < compound_pages);
38
39 /*
40 * Dom0 is mapped 1:1, while the Linux page can span across
41 * multiple Xen pages, it's not possible for it to contain a
42 * mix of local and foreign Xen pages. So if the first xen_pfn
43 * == mfn the page is local otherwise it's a foreign page
44 * grant-mapped in dom0. If the page is local we can safely
45 * call the native dma_ops function, otherwise we call the xen
46 * specific function.
47 */
48 if (local)
49 xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
50 else
51 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
52}
53
54static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
55 size_t size, enum dma_data_direction dir, unsigned long attrs)
56{
57 unsigned long pfn = PFN_DOWN(handle);
58 /*
59 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
60 * multiple Xen page, it's not possible to have a mix of local and
61 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
62 * foreign mfn will always return false. If the page is local we can
63 * safely call the native dma_ops function, otherwise we call the xen
64 * specific function.
65 */
66 if (pfn_valid(pfn)) {
67 if (xen_get_dma_ops(hwdev)->unmap_page)
68 xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
69 } else
70 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
71}
72
73static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
74 dma_addr_t handle, size_t size, enum dma_data_direction dir)
75{
76 unsigned long pfn = PFN_DOWN(handle);
77 if (pfn_valid(pfn)) {
78 if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
79 xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
80 } else
81 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
82}
83
84static inline void xen_dma_sync_single_for_device(struct device *hwdev,
85 dma_addr_t handle, size_t size, enum dma_data_direction dir)
86{
87 unsigned long pfn = PFN_DOWN(handle);
88 if (pfn_valid(pfn)) {
89 if (xen_get_dma_ops(hwdev)->sync_single_for_device)
90 xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
91 } else
92 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
93}
94
95#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 9908dacf9229..844861368cd5 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -31,7 +31,6 @@
31#include <linux/smp.h> 31#include <linux/smp.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/ratelimit.h>
35#include <linux/errno.h> 34#include <linux/errno.h>
36#include <linux/list.h> 35#include <linux/list.h>
37#include <linux/kallsyms.h> 36#include <linux/kallsyms.h>
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
109 return nr_irqs; 108 return nr_irqs;
110} 109}
111#endif 110#endif
112
113#ifdef CONFIG_HOTPLUG_CPU
114static bool migrate_one_irq(struct irq_desc *desc)
115{
116 struct irq_data *d = irq_desc_get_irq_data(desc);
117 const struct cpumask *affinity = irq_data_get_affinity_mask(d);
118 struct irq_chip *c;
119 bool ret = false;
120
121 /*
122 * If this is a per-CPU interrupt, or the affinity does not
123 * include this CPU, then we have nothing to do.
124 */
125 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
126 return false;
127
128 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
129 affinity = cpu_online_mask;
130 ret = true;
131 }
132
133 c = irq_data_get_irq_chip(d);
134 if (!c->irq_set_affinity)
135 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
136 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
137 cpumask_copy(irq_data_get_affinity_mask(d), affinity);
138
139 return ret;
140}
141
142/*
143 * The current CPU has been marked offline. Migrate IRQs off this CPU.
144 * If the affinity settings do not allow other CPUs, force them onto any
145 * available CPU.
146 *
147 * Note: we must iterate over all IRQs, whether they have an attached
148 * action structure or not, as we need to get chained interrupts too.
149 */
150void migrate_irqs(void)
151{
152 unsigned int i;
153 struct irq_desc *desc;
154 unsigned long flags;
155
156 local_irq_save(flags);
157
158 for_each_irq_desc(i, desc) {
159 bool affinity_broken;
160
161 raw_spin_lock(&desc->lock);
162 affinity_broken = migrate_one_irq(desc);
163 raw_spin_unlock(&desc->lock);
164
165 if (affinity_broken)
166 pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
167 i, smp_processor_id());
168 }
169
170 local_irq_restore(flags);
171}
172#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3bf82232b1be..1d6f5ea522f4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -254,7 +254,7 @@ int __cpu_disable(void)
254 /* 254 /*
255 * OK - migrate IRQs away from this CPU 255 * OK - migrate IRQs away from this CPU
256 */ 256 */
257 migrate_irqs(); 257 irq_migrate_all_off_this_cpu();
258 258
259 /* 259 /*
260 * Flush user cache and TLB mappings, and then remove this CPU 260 * Flush user cache and TLB mappings, and then remove this CPU
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 222c1635bc7a..e8bd288fd5be 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1450 reset_coproc_regs(vcpu, table, num); 1450 reset_coproc_regs(vcpu, table, num);
1451 1451
1452 for (num = 1; num < NR_CP15_REGS; num++) 1452 for (num = 1; num < NR_CP15_REGS; num++)
1453 if (vcpu_cp15(vcpu, num) == 0x42424242) 1453 WARN(vcpu_cp15(vcpu, num) == 0x42424242,
1454 panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); 1454 "Didn't reset vcpu_cp15(vcpu, %zi)", num);
1455} 1455}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 5ed0c3ee33d6..e53327912adc 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -26,6 +26,7 @@
26#include <asm/cputype.h> 26#include <asm/cputype.h>
27#include <asm/kvm_arm.h> 27#include <asm/kvm_arm.h>
28#include <asm/kvm_coproc.h> 28#include <asm/kvm_coproc.h>
29#include <asm/kvm_emulate.h>
29 30
30#include <kvm/arm_arch_timer.h> 31#include <kvm/arm_arch_timer.h>
31 32
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
69 /* Reset CP15 registers */ 70 /* Reset CP15 registers */
70 kvm_reset_coprocs(vcpu); 71 kvm_reset_coprocs(vcpu);
71 72
73 /*
74 * Additional reset state handling that PSCI may have imposed on us.
75 * Must be done after all the sys_reg reset.
76 */
77 if (READ_ONCE(vcpu->arch.reset_state.reset)) {
78 unsigned long target_pc = vcpu->arch.reset_state.pc;
79
80 /* Gracefully handle Thumb2 entry point */
81 if (target_pc & 1) {
82 target_pc &= ~1UL;
83 vcpu_set_thumb(vcpu);
84 }
85
86 /* Propagate caller endianness */
87 if (vcpu->arch.reset_state.be)
88 kvm_vcpu_set_be(vcpu);
89
90 *vcpu_pc(vcpu) = target_pc;
91 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
92
93 vcpu->arch.reset_state.reset = false;
94 }
95
72 /* Reset arch_timer context */ 96 /* Reset arch_timer context */
73 return kvm_timer_vcpu_reset(vcpu); 97 return kvm_timer_vcpu_reset(vcpu);
74} 98}
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 318394ed5c7a..95a11d5b3587 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
83 } else /* remote PCI bus */ 83 } else /* remote PCI bus */
84 base = cnspci->cfg1_regs + ((busno & 0xf) << 20); 84 base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
85 85
86 return base + (where & 0xffc) + (devfn << 12); 86 return base + where + (devfn << 12);
87} 87}
88 88
89static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, 89static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
@@ -93,7 +93,7 @@ static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
93 u32 mask = (0x1ull << (size * 8)) - 1; 93 u32 mask = (0x1ull << (size * 8)) - 1;
94 int shift = (where % 4) * 8; 94 int shift = (where % 4) * 8;
95 95
96 ret = pci_generic_config_read32(bus, devfn, where, size, val); 96 ret = pci_generic_config_read(bus, devfn, where, size, val);
97 97
98 if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn && 98 if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn &&
99 (where & 0xffc) == PCI_CLASS_REVISION) 99 (where & 0xffc) == PCI_CLASS_REVISION)
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index e52ec1619b70..c4da635ee4ce 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -208,9 +208,9 @@ static struct gpiod_lookup_table mmc_gpios_table = {
208 .dev_id = "da830-mmc.0", 208 .dev_id = "da830-mmc.0",
209 .table = { 209 .table = {
210 /* gpio chip 1 contains gpio range 32-63 */ 210 /* gpio chip 1 contains gpio range 32-63 */
211 GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd", 211 GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_CD_PIN, "cd",
212 GPIO_ACTIVE_LOW), 212 GPIO_ACTIVE_LOW),
213 GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp", 213 GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_WP_PIN, "wp",
214 GPIO_ACTIVE_LOW), 214 GPIO_ACTIVE_LOW),
215 }, 215 },
216}; 216};
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 6a29baf0a289..44bca048dfd0 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -805,9 +805,9 @@ static struct gpiod_lookup_table mmc_gpios_table = {
805 .dev_id = "da830-mmc.0", 805 .dev_id = "da830-mmc.0",
806 .table = { 806 .table = {
807 /* gpio chip 2 contains gpio range 64-95 */ 807 /* gpio chip 2 contains gpio range 64-95 */
808 GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", 808 GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_CD_PIN, "cd",
809 GPIO_ACTIVE_LOW), 809 GPIO_ACTIVE_LOW),
810 GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", 810 GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_WP_PIN, "wp",
811 GPIO_ACTIVE_HIGH), 811 GPIO_ACTIVE_HIGH),
812 }, 812 },
813}; 813};
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index f53a461a606f..f7fa960c23e3 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -117,9 +117,9 @@ static struct platform_device davinci_nand_device = {
117static struct gpiod_lookup_table i2c_recovery_gpiod_table = { 117static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
118 .dev_id = "i2c_davinci.1", 118 .dev_id = "i2c_davinci.1",
119 .table = { 119 .table = {
120 GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SDA_PIN, "sda", 120 GPIO_LOOKUP("davinci_gpio", DM355_I2C_SDA_PIN, "sda",
121 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 121 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
122 GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SCL_PIN, "scl", 122 GPIO_LOOKUP("davinci_gpio", DM355_I2C_SCL_PIN, "scl",
123 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 123 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
124 }, 124 },
125}; 125};
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index e1428115067f..b80c4ee76217 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -660,9 +660,9 @@ static struct i2c_board_info __initdata i2c_info[] = {
660static struct gpiod_lookup_table i2c_recovery_gpiod_table = { 660static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
661 .dev_id = "i2c_davinci.1", 661 .dev_id = "i2c_davinci.1",
662 .table = { 662 .table = {
663 GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SDA_PIN, "sda", 663 GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SDA_PIN, "sda",
664 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 664 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
665 GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SCL_PIN, "scl", 665 GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SCL_PIN, "scl",
666 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 666 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
667 }, 667 },
668}; 668};
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 8e8d51f4a276..94c4f126ef86 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -134,9 +134,9 @@ static const short hawk_mmcsd0_pins[] = {
134static struct gpiod_lookup_table mmc_gpios_table = { 134static struct gpiod_lookup_table mmc_gpios_table = {
135 .dev_id = "da830-mmc.0", 135 .dev_id = "da830-mmc.0",
136 .table = { 136 .table = {
137 GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd", 137 GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_CD_PIN, "cd",
138 GPIO_ACTIVE_LOW), 138 GPIO_ACTIVE_LOW),
139 GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp", 139 GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_WP_PIN, "wp",
140 GPIO_ACTIVE_LOW), 140 GPIO_ACTIVE_LOW),
141 }, 141 },
142}; 142};
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index a109f6482413..8dfad012dfae 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -390,10 +390,14 @@ static int __ref impd1_probe(struct lm_device *dev)
390 char *mmciname; 390 char *mmciname;
391 391
392 lookup = devm_kzalloc(&dev->dev, 392 lookup = devm_kzalloc(&dev->dev,
393 sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup), 393 struct_size(lookup, table, 3),
394 GFP_KERNEL); 394 GFP_KERNEL);
395 chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL); 395 chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
396 mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id); 396 mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
397 "lm%x:00700", dev->id);
398 if (!lookup || !chipname || !mmciname)
399 return -ENOMEM;
400
397 lookup->dev_id = mmciname; 401 lookup->dev_id = mmciname;
398 /* 402 /*
399 * Offsets on GPIO block 1: 403 * Offsets on GPIO block 1:
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 3b73813c6b04..23e8c93515d4 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
75/* 75/*
76 * N2100 PCI. 76 * N2100 PCI.
77 */ 77 */
78static int __init 78static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
79n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
80{ 79{
81 int irq; 80 int irq;
82 81
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index a8b291f00109..dae514c8276a 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
152 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && 152 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
153 (cx->mpu_logic_state == PWRDM_POWER_OFF); 153 (cx->mpu_logic_state == PWRDM_POWER_OFF);
154 154
155 /* Enter broadcast mode for periodic timers */
156 tick_broadcast_enable();
157
158 /* Enter broadcast mode for one-shot timers */
155 tick_broadcast_enter(); 159 tick_broadcast_enter();
156 160
157 /* 161 /*
@@ -218,15 +222,6 @@ fail:
218 return index; 222 return index;
219} 223}
220 224
221/*
222 * For each cpu, setup the broadcast timer because local timers
223 * stops for the states above C1.
224 */
225static void omap_setup_broadcast_timer(void *arg)
226{
227 tick_broadcast_enable();
228}
229
230static struct cpuidle_driver omap4_idle_driver = { 225static struct cpuidle_driver omap4_idle_driver = {
231 .name = "omap4_idle", 226 .name = "omap4_idle",
232 .owner = THIS_MODULE, 227 .owner = THIS_MODULE,
@@ -319,8 +314,5 @@ int __init omap4_idle_init(void)
319 if (!cpu_clkdm[0] || !cpu_clkdm[1]) 314 if (!cpu_clkdm[0] || !cpu_clkdm[1])
320 return -ENODEV; 315 return -ENODEV;
321 316
322 /* Configure the broadcast timer on each cpu */
323 on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
324
325 return cpuidle_register(idle_driver, cpu_online_mask); 317 return cpuidle_register(idle_driver, cpu_online_mask);
326} 318}
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index f86b72d1d59e..1444b4b4bd9f 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
83 u32 enable_mask, enable_shift; 83 u32 enable_mask, enable_shift;
84 u32 pipd_mask, pipd_shift; 84 u32 pipd_mask, pipd_shift;
85 u32 reg; 85 u32 reg;
86 int ret;
86 87
87 if (dsi_id == 0) { 88 if (dsi_id == 0) {
88 enable_mask = OMAP4_DSI1_LANEENABLE_MASK; 89 enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
@@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
98 return -ENODEV; 99 return -ENODEV;
99 } 100 }
100 101
101 regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg); 102 ret = regmap_read(omap4_dsi_mux_syscon,
103 OMAP4_DSIPHY_SYSCON_OFFSET,
104 &reg);
105 if (ret)
106 return ret;
102 107
103 reg &= ~enable_mask; 108 reg &= ~enable_mask;
104 reg &= ~pipd_mask; 109 reg &= ~pipd_mask;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index fc5fb776a710..17558be4bf0a 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -50,6 +50,9 @@
50#define OMAP4_NR_BANKS 4 50#define OMAP4_NR_BANKS 4
51#define OMAP4_NR_IRQS 128 51#define OMAP4_NR_IRQS 128
52 52
53#define SYS_NIRQ1_EXT_SYS_IRQ_1 7
54#define SYS_NIRQ2_EXT_SYS_IRQ_2 119
55
53static void __iomem *wakeupgen_base; 56static void __iomem *wakeupgen_base;
54static void __iomem *sar_base; 57static void __iomem *sar_base;
55static DEFINE_RAW_SPINLOCK(wakeupgen_lock); 58static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
153 irq_chip_unmask_parent(d); 156 irq_chip_unmask_parent(d);
154} 157}
155 158
159/*
160 * The sys_nirq pins bypass peripheral modules and are wired directly
161 * to MPUSS wakeupgen. They get automatically inverted for GIC.
162 */
163static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
164{
165 bool inverted = false;
166
167 switch (type) {
168 case IRQ_TYPE_LEVEL_LOW:
169 type &= ~IRQ_TYPE_LEVEL_MASK;
170 type |= IRQ_TYPE_LEVEL_HIGH;
171 inverted = true;
172 break;
173 case IRQ_TYPE_EDGE_FALLING:
174 type &= ~IRQ_TYPE_EDGE_BOTH;
175 type |= IRQ_TYPE_EDGE_RISING;
176 inverted = true;
177 break;
178 default:
179 break;
180 }
181
182 if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
183 d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
184 pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
185 d->hwirq);
186
187 return irq_chip_set_type_parent(d, type);
188}
189
156#ifdef CONFIG_HOTPLUG_CPU 190#ifdef CONFIG_HOTPLUG_CPU
157static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); 191static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
158 192
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
446 .irq_mask = wakeupgen_mask, 480 .irq_mask = wakeupgen_mask,
447 .irq_unmask = wakeupgen_unmask, 481 .irq_unmask = wakeupgen_unmask,
448 .irq_retrigger = irq_chip_retrigger_hierarchy, 482 .irq_retrigger = irq_chip_retrigger_hierarchy,
449 .irq_set_type = irq_chip_set_type_parent, 483 .irq_set_type = wakeupgen_irq_set_type,
450 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, 484 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
451#ifdef CONFIG_SMP 485#ifdef CONFIG_SMP
452 .irq_set_affinity = irq_chip_set_affinity_parent, 486 .irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index b5531dd3ae9c..3a04c73ac03c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1002,8 +1002,10 @@ static int _enable_clocks(struct omap_hwmod *oh)
1002 clk_enable(oh->_clk); 1002 clk_enable(oh->_clk);
1003 1003
1004 list_for_each_entry(os, &oh->slave_ports, node) { 1004 list_for_each_entry(os, &oh->slave_ports, node) {
1005 if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) 1005 if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) {
1006 omap2_clk_deny_idle(os->_clk);
1006 clk_enable(os->_clk); 1007 clk_enable(os->_clk);
1008 }
1007 } 1009 }
1008 1010
1009 /* The opt clocks are controlled by the device driver. */ 1011 /* The opt clocks are controlled by the device driver. */
@@ -1055,8 +1057,10 @@ static int _disable_clocks(struct omap_hwmod *oh)
1055 clk_disable(oh->_clk); 1057 clk_disable(oh->_clk);
1056 1058
1057 list_for_each_entry(os, &oh->slave_ports, node) { 1059 list_for_each_entry(os, &oh->slave_ports, node) {
1058 if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) 1060 if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) {
1059 clk_disable(os->_clk); 1061 clk_disable(os->_clk);
1062 omap2_clk_allow_idle(os->_clk);
1063 }
1060 } 1064 }
1061 1065
1062 if (oh->flags & HWMOD_OPT_CLKS_NEEDED) 1066 if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
@@ -2436,9 +2440,13 @@ static void _setup_iclk_autoidle(struct omap_hwmod *oh)
2436 continue; 2440 continue;
2437 2441
2438 if (os->flags & OCPIF_SWSUP_IDLE) { 2442 if (os->flags & OCPIF_SWSUP_IDLE) {
2439 /* XXX omap_iclk_deny_idle(c); */ 2443 /*
2444 * we might have multiple users of one iclk with
2445 * different requirements, disable autoidle when
2446 * the module is enabled, e.g. dss iclk
2447 */
2440 } else { 2448 } else {
2441 /* XXX omap_iclk_allow_idle(c); */ 2449 /* we are enabling autoidle afterwards anyways */
2442 clk_enable(os->_clk); 2450 clk_enable(os->_clk);
2443 } 2451 }
2444 } 2452 }
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index 5fb6f79059a8..afd98971d903 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -32,6 +32,8 @@ void __iomem *rst_manager_base_addr;
32void __iomem *sdr_ctl_base_addr; 32void __iomem *sdr_ctl_base_addr;
33unsigned long socfpga_cpu1start_addr; 33unsigned long socfpga_cpu1start_addr;
34 34
35extern void __init socfpga_reset_init(void);
36
35static void __init socfpga_sysmgr_init(void) 37static void __init socfpga_sysmgr_init(void)
36{ 38{
37 struct device_node *np; 39 struct device_node *np;
@@ -64,6 +66,7 @@ static void __init socfpga_init_irq(void)
64 66
65 if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM)) 67 if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM))
66 socfpga_init_ocram_ecc(); 68 socfpga_init_ocram_ecc();
69 socfpga_reset_init();
67} 70}
68 71
69static void __init socfpga_arria10_init_irq(void) 72static void __init socfpga_arria10_init_irq(void)
@@ -74,6 +77,7 @@ static void __init socfpga_arria10_init_irq(void)
74 socfpga_init_arria10_l2_ecc(); 77 socfpga_init_arria10_l2_ecc();
75 if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM)) 78 if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM))
76 socfpga_init_arria10_ocram_ecc(); 79 socfpga_init_arria10_ocram_ecc();
80 socfpga_reset_init();
77} 81}
78 82
79static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) 83static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
index 028e50c6383f..a32c3b631484 100644
--- a/arch/arm/mach-tango/pm.c
+++ b/arch/arm/mach-tango/pm.c
@@ -3,6 +3,7 @@
3#include <linux/suspend.h> 3#include <linux/suspend.h>
4#include <asm/suspend.h> 4#include <asm/suspend.h>
5#include "smc.h" 5#include "smc.h"
6#include "pm.h"
6 7
7static int tango_pm_powerdown(unsigned long arg) 8static int tango_pm_powerdown(unsigned long arg)
8{ 9{
@@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
24 .valid = suspend_valid_only_mem, 25 .valid = suspend_valid_only_mem,
25}; 26};
26 27
27static int __init tango_pm_init(void) 28void __init tango_pm_init(void)
28{ 29{
29 suspend_set_ops(&tango_pm_ops); 30 suspend_set_ops(&tango_pm_ops);
30 return 0;
31} 31}
32
33late_initcall(tango_pm_init);
diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
new file mode 100644
index 000000000000..35ea705a0ee2
--- /dev/null
+++ b/arch/arm/mach-tango/pm.h
@@ -0,0 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifdef CONFIG_SUSPEND
4void __init tango_pm_init(void);
5#else
6#define tango_pm_init NULL
7#endif
diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
index 677dd7b5efd9..824f90737b04 100644
--- a/arch/arm/mach-tango/setup.c
+++ b/arch/arm/mach-tango/setup.c
@@ -2,6 +2,7 @@
2#include <asm/mach/arch.h> 2#include <asm/mach/arch.h>
3#include <asm/hardware/cache-l2x0.h> 3#include <asm/hardware/cache-l2x0.h>
4#include "smc.h" 4#include "smc.h"
5#include "pm.h"
5 6
6static void tango_l2c_write(unsigned long val, unsigned int reg) 7static void tango_l2c_write(unsigned long val, unsigned int reg)
7{ 8{
@@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
15 .dt_compat = tango_dt_compat, 16 .dt_compat = tango_dt_compat,
16 .l2c_aux_mask = ~0, 17 .l2c_aux_mask = ~0,
17 .l2c_write_sec = tango_l2c_write, 18 .l2c_write_sec = tango_l2c_write,
19 .init_late = tango_pm_init,
18MACHINE_END 20MACHINE_END
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f1e2922e447c..1e3e08a1c456 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev)
2390 return; 2390 return;
2391 2391
2392 arm_teardown_iommu_dma_ops(dev); 2392 arm_teardown_iommu_dma_ops(dev);
2393 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
2394 set_dma_ops(dev, NULL);
2393} 2395}
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ed36dcab80f1..f51919974183 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
190 if (ssp == NULL) 190 if (ssp == NULL)
191 return -ENODEV; 191 return -ENODEV;
192 192
193 iounmap(ssp->mmio_base);
194
195 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 193 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
196 release_mem_region(res->start, resource_size(res)); 194 release_mem_region(res->start, resource_size(res));
197 195
@@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
201 list_del(&ssp->node); 199 list_del(&ssp->node);
202 mutex_unlock(&ssp_lock); 200 mutex_unlock(&ssp_lock);
203 201
204 kfree(ssp);
205 return 0; 202 return 0;
206} 203}
207 204
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index 2c118a6ab358..0dc23fc227ed 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
247 } 247 }
248 248
249 /* Copy arch-dep-instance from template. */ 249 /* Copy arch-dep-instance from template. */
250 memcpy(code, (unsigned char *)optprobe_template_entry, 250 memcpy(code, (unsigned long *)&optprobe_template_entry,
251 TMPL_END_IDX * sizeof(kprobe_opcode_t)); 251 TMPL_END_IDX * sizeof(kprobe_opcode_t));
252 252
253 /* Adjust buffer according to instruction. */ 253 /* Adjust buffer according to instruction. */
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index cb44aa290e73..e1d44b903dfc 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -7,7 +7,6 @@
7#include <linux/of_address.h> 7#include <linux/of_address.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/dma-mapping.h>
11#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
12#include <linux/swiotlb.h> 11#include <linux/swiotlb.h>
13 12
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
index b0c64f75792c..8974b5a1d3b1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
@@ -188,6 +188,7 @@
188 reg = <0x3a3>; 188 reg = <0x3a3>;
189 interrupt-parent = <&r_intc>; 189 interrupt-parent = <&r_intc>;
190 interrupts = <0 IRQ_TYPE_LEVEL_LOW>; 190 interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
191 x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */
191 }; 192 };
192}; 193};
193 194
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 837a03dee875..2abb335145a6 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -390,7 +390,7 @@
390 }; 390 };
391 391
392 video-codec@1c0e000 { 392 video-codec@1c0e000 {
393 compatible = "allwinner,sun50i-h5-video-engine"; 393 compatible = "allwinner,sun50i-a64-video-engine";
394 reg = <0x01c0e000 0x1000>; 394 reg = <0x01c0e000 0x1000>;
395 clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, 395 clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>,
396 <&ccu CLK_DRAM_VE>; 396 <&ccu CLK_DRAM_VE>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index e14e0ce7e89f..016641a41694 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -187,8 +187,7 @@
187 max-frequency = <100000000>; 187 max-frequency = <100000000>;
188 disable-wp; 188 disable-wp;
189 189
190 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 190 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
191 cd-inverted;
192 191
193 vmmc-supply = <&vddao_3v3>; 192 vmmc-supply = <&vddao_3v3>;
194 vqmmc-supply = <&vddio_boot>; 193 vqmmc-supply = <&vddio_boot>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
index 8cd50b75171d..ade2ee09ae96 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
@@ -305,8 +305,7 @@
305 max-frequency = <200000000>; 305 max-frequency = <200000000>;
306 disable-wp; 306 disable-wp;
307 307
308 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 308 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
309 cd-inverted;
310 309
311 vmmc-supply = <&vddio_ao3v3>; 310 vmmc-supply = <&vddio_ao3v3>;
312 vqmmc-supply = <&vddio_tf>; 311 vqmmc-supply = <&vddio_tf>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 4cf7f6e80c6a..25105ac96d55 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -238,8 +238,7 @@
238 max-frequency = <100000000>; 238 max-frequency = <100000000>;
239 disable-wp; 239 disable-wp;
240 240
241 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 241 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
242 cd-inverted;
243 242
244 vmmc-supply = <&vddao_3v3>; 243 vmmc-supply = <&vddao_3v3>;
245 vqmmc-supply = <&vddio_card>; 244 vqmmc-supply = <&vddio_card>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 2e1cd5e3a246..1cc9dc68ef00 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -258,8 +258,7 @@
258 max-frequency = <100000000>; 258 max-frequency = <100000000>;
259 disable-wp; 259 disable-wp;
260 260
261 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 261 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
262 cd-inverted;
263 262
264 vmmc-supply = <&tflash_vdd>; 263 vmmc-supply = <&tflash_vdd>;
265 vqmmc-supply = <&tf_io>; 264 vqmmc-supply = <&tf_io>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index ce862266b9aa..0be0f2a5d2fe 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -196,8 +196,7 @@
196 max-frequency = <100000000>; 196 max-frequency = <100000000>;
197 disable-wp; 197 disable-wp;
198 198
199 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 199 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
200 cd-inverted;
201 200
202 vmmc-supply = <&vddao_3v3>; 201 vmmc-supply = <&vddao_3v3>;
203 vqmmc-supply = <&vddio_card>; 202 vqmmc-supply = <&vddio_card>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 93a4acf2c46c..ad4d50bd9d77 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -154,8 +154,7 @@
154 max-frequency = <100000000>; 154 max-frequency = <100000000>;
155 disable-wp; 155 disable-wp;
156 156
157 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 157 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
158 cd-inverted;
159 158
160 vmmc-supply = <&vcc_3v3>; 159 vmmc-supply = <&vcc_3v3>;
161}; 160};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
index ec09bb5792b7..2d2db783c44c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
@@ -211,8 +211,7 @@
211 max-frequency = <100000000>; 211 max-frequency = <100000000>;
212 disable-wp; 212 disable-wp;
213 213
214 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 214 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
215 cd-inverted;
216 215
217 vmmc-supply = <&vddao_3v3>; 216 vmmc-supply = <&vddao_3v3>;
218 vqmmc-supply = <&vcc_3v3>; 217 vqmmc-supply = <&vcc_3v3>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
index f1c410e2da2b..796baea7a0bf 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
@@ -131,8 +131,7 @@
131 max-frequency = <100000000>; 131 max-frequency = <100000000>;
132 disable-wp; 132 disable-wp;
133 133
134 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 134 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
135 cd-inverted;
136 135
137 vmmc-supply = <&vddao_3v3>; 136 vmmc-supply = <&vddao_3v3>;
138 vqmmc-supply = <&vddio_card>; 137 vqmmc-supply = <&vddio_card>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index db293440e4ca..255cede7b447 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -238,8 +238,7 @@
238 max-frequency = <100000000>; 238 max-frequency = <100000000>;
239 disable-wp; 239 disable-wp;
240 240
241 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 241 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
242 cd-inverted;
243 242
244 vmmc-supply = <&vcc_3v3>; 243 vmmc-supply = <&vcc_3v3>;
245 vqmmc-supply = <&vcc_card>; 244 vqmmc-supply = <&vcc_card>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index 6739697be1de..9cbdb85fb591 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -183,8 +183,7 @@
183 max-frequency = <100000000>; 183 max-frequency = <100000000>;
184 disable-wp; 184 disable-wp;
185 185
186 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 186 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
187 cd-inverted;
188 187
189 vmmc-supply = <&vddao_3v3>; 188 vmmc-supply = <&vddao_3v3>;
190 vqmmc-supply = <&vddio_card>; 189 vqmmc-supply = <&vddio_card>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index a1b31013ab6e..bc811a2faf42 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -137,8 +137,7 @@
137 max-frequency = <100000000>; 137 max-frequency = <100000000>;
138 disable-wp; 138 disable-wp;
139 139
140 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 140 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
141 cd-inverted;
142 141
143 vmmc-supply = <&vddao_3v3>; 142 vmmc-supply = <&vddao_3v3>;
144 vqmmc-supply = <&vddio_boot>; 143 vqmmc-supply = <&vddio_boot>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index 3c3a667a8df8..3f086ed7de05 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -356,8 +356,7 @@
356 max-frequency = <100000000>; 356 max-frequency = <100000000>;
357 disable-wp; 357 disable-wp;
358 358
359 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 359 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
360 cd-inverted;
361 360
362 vmmc-supply = <&vddao_3v3>; 361 vmmc-supply = <&vddao_3v3>;
363 vqmmc-supply = <&vddio_boot>; 362 vqmmc-supply = <&vddio_boot>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index f7a1cffab4a8..8acfd40090d2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -147,8 +147,7 @@
147 max-frequency = <100000000>; 147 max-frequency = <100000000>;
148 disable-wp; 148 disable-wp;
149 149
150 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 150 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
151 cd-inverted;
152 151
153 vmmc-supply = <&vddao_3v3>; 152 vmmc-supply = <&vddao_3v3>;
154 vqmmc-supply = <&vddio_boot>; 153 vqmmc-supply = <&vddio_boot>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
index 7212dc4531e4..7fa20a8ede17 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
@@ -170,8 +170,7 @@
170 max-frequency = <100000000>; 170 max-frequency = <100000000>;
171 disable-wp; 171 disable-wp;
172 172
173 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 173 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
174 cd-inverted;
175 174
176 vmmc-supply = <&vddao_3v3>; 175 vmmc-supply = <&vddao_3v3>;
177 vqmmc-supply = <&vddio_boot>; 176 vqmmc-supply = <&vddio_boot>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index 64acccc4bfcb..f74b13aa5aa5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -227,34 +227,34 @@
227 227
228 pinctrl_usdhc1_100mhz: usdhc1-100grp { 228 pinctrl_usdhc1_100mhz: usdhc1-100grp {
229 fsl,pins = < 229 fsl,pins = <
230 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 230 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d
231 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 231 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd
232 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 232 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd
233 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 233 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd
234 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 234 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd
235 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 235 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd
236 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 236 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd
237 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 237 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd
238 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 238 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd
239 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 239 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd
240 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 240 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d
241 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 241 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
242 >; 242 >;
243 }; 243 };
244 244
245 pinctrl_usdhc1_200mhz: usdhc1-200grp { 245 pinctrl_usdhc1_200mhz: usdhc1-200grp {
246 fsl,pins = < 246 fsl,pins = <
247 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 247 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f
248 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 248 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf
249 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 249 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf
250 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 250 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf
251 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 251 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf
252 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 252 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf
253 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 253 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf
254 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 254 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf
255 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 255 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf
256 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 256 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf
257 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 257 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f
258 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 258 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
259 >; 259 >;
260 }; 260 };
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 8e9d6d5ed7b2..b6d31499fb43 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -360,6 +360,8 @@
360 <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, 360 <&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
361 <&clk IMX8MQ_CLK_USDHC1_ROOT>; 361 <&clk IMX8MQ_CLK_USDHC1_ROOT>;
362 clock-names = "ipg", "ahb", "per"; 362 clock-names = "ipg", "ahb", "per";
363 assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>;
364 assigned-clock-rates = <400000000>;
363 fsl,tuning-start-tap = <20>; 365 fsl,tuning-start-tap = <20>;
364 fsl,tuning-step = <2>; 366 fsl,tuning-step = <2>;
365 bus-width = <4>; 367 bus-width = <4>;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index 5b4a9609e31f..2468762283a5 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -351,7 +351,7 @@
351 reg = <0>; 351 reg = <0>;
352 pinctrl-names = "default"; 352 pinctrl-names = "default";
353 pinctrl-0 = <&cp0_copper_eth_phy_reset>; 353 pinctrl-0 = <&cp0_copper_eth_phy_reset>;
354 reset-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_LOW>; 354 reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>;
355 reset-assert-us = <10000>; 355 reset-assert-us = <10000>;
356 }; 356 };
357 357
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
index 29ea7e81ec4c..329f8ceeebea 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
@@ -183,7 +183,7 @@
183 pinctrl-0 = <&cp0_pcie_pins>; 183 pinctrl-0 = <&cp0_pcie_pins>;
184 num-lanes = <4>; 184 num-lanes = <4>;
185 num-viewport = <8>; 185 num-viewport = <8>;
186 reset-gpio = <&cp0_gpio1 20 GPIO_ACTIVE_LOW>; 186 reset-gpios = <&cp0_gpio2 20 GPIO_ACTIVE_LOW>;
187 status = "okay"; 187 status = "okay";
188}; 188};
189 189
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 7d94c1fa592a..7f799cb5668e 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -28,6 +28,23 @@
28 method = "smc"; 28 method = "smc";
29 }; 29 };
30 30
31 reserved-memory {
32 #address-cells = <2>;
33 #size-cells = <2>;
34 ranges;
35
36 /*
37 * This area matches the mapping done with a
38 * mainline U-Boot, and should be updated by the
39 * bootloader.
40 */
41
42 psci-area@4000000 {
43 reg = <0x0 0x4000000 0x0 0x200000>;
44 no-map;
45 };
46 };
47
31 ap806 { 48 ap806 {
32 #address-cells = <2>; 49 #address-cells = <2>;
33 #size-cells = <2>; 50 #size-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 99b7495455a6..838e32cc14c9 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -404,7 +404,7 @@
404 }; 404 };
405 405
406 intc: interrupt-controller@9bc0000 { 406 intc: interrupt-controller@9bc0000 {
407 compatible = "arm,gic-v3"; 407 compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
408 #interrupt-cells = <3>; 408 #interrupt-cells = <3>;
409 interrupt-controller; 409 interrupt-controller;
410 #redistributor-regions = <1>; 410 #redistributor-regions = <1>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index 20745a8528c5..719ed9d9067d 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -1011,6 +1011,9 @@
1011 <&cpg CPG_CORE R8A774A1_CLK_S3D1>, 1011 <&cpg CPG_CORE R8A774A1_CLK_S3D1>,
1012 <&scif_clk>; 1012 <&scif_clk>;
1013 clock-names = "fck", "brg_int", "scif_clk"; 1013 clock-names = "fck", "brg_int", "scif_clk";
1014 dmas = <&dmac1 0x13>, <&dmac1 0x12>,
1015 <&dmac2 0x13>, <&dmac2 0x12>;
1016 dma-names = "tx", "rx", "tx", "rx";
1014 power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; 1017 power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
1015 resets = <&cpg 310>; 1018 resets = <&cpg 310>;
1016 status = "disabled"; 1019 status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index afedbf5728ec..0648d12778ed 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -1262,6 +1262,9 @@
1262 <&cpg CPG_CORE R8A7796_CLK_S3D1>, 1262 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
1263 <&scif_clk>; 1263 <&scif_clk>;
1264 clock-names = "fck", "brg_int", "scif_clk"; 1264 clock-names = "fck", "brg_int", "scif_clk";
1265 dmas = <&dmac1 0x13>, <&dmac1 0x12>,
1266 <&dmac2 0x13>, <&dmac2 0x12>;
1267 dma-names = "tx", "rx", "tx", "rx";
1265 power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; 1268 power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
1266 resets = <&cpg 310>; 1269 resets = <&cpg 310>;
1267 status = "disabled"; 1270 status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 6dc9b1fef830..4b3730f640ef 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -1068,6 +1068,9 @@
1068 <&cpg CPG_CORE R8A77965_CLK_S3D1>, 1068 <&cpg CPG_CORE R8A77965_CLK_S3D1>,
1069 <&scif_clk>; 1069 <&scif_clk>;
1070 clock-names = "fck", "brg_int", "scif_clk"; 1070 clock-names = "fck", "brg_int", "scif_clk";
1071 dmas = <&dmac1 0x13>, <&dmac1 0x12>,
1072 <&dmac2 0x13>, <&dmac2 0x12>;
1073 dma-names = "tx", "rx", "tx", "rx";
1071 power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; 1074 power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
1072 resets = <&cpg 310>; 1075 resets = <&cpg 310>;
1073 status = "disabled"; 1076 status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index bd937d68ca3b..040b36ef0dd2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -40,6 +40,7 @@
40 pinctrl-0 = <&usb30_host_drv>; 40 pinctrl-0 = <&usb30_host_drv>;
41 regulator-name = "vcc_host_5v"; 41 regulator-name = "vcc_host_5v";
42 regulator-always-on; 42 regulator-always-on;
43 regulator-boot-on;
43 vin-supply = <&vcc_sys>; 44 vin-supply = <&vcc_sys>;
44 }; 45 };
45 46
@@ -51,6 +52,7 @@
51 pinctrl-0 = <&usb20_host_drv>; 52 pinctrl-0 = <&usb20_host_drv>;
52 regulator-name = "vcc_host1_5v"; 53 regulator-name = "vcc_host1_5v";
53 regulator-always-on; 54 regulator-always-on;
55 regulator-boot-on;
54 vin-supply = <&vcc_sys>; 56 vin-supply = <&vcc_sys>;
55 }; 57 };
56 58
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
index 1ee0dc0d9f10..d1cf404b8708 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
@@ -22,7 +22,7 @@
22 backlight = <&backlight>; 22 backlight = <&backlight>;
23 power-supply = <&pp3300_disp>; 23 power-supply = <&pp3300_disp>;
24 24
25 ports { 25 port {
26 panel_in_edp: endpoint { 26 panel_in_edp: endpoint {
27 remote-endpoint = <&edp_out_panel>; 27 remote-endpoint = <&edp_out_panel>;
28 }; 28 };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
index 81e73103fa78..15e254a77391 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
@@ -43,7 +43,7 @@
43 backlight = <&backlight>; 43 backlight = <&backlight>;
44 power-supply = <&pp3300_disp>; 44 power-supply = <&pp3300_disp>;
45 45
46 ports { 46 port {
47 panel_in_edp: endpoint { 47 panel_in_edp: endpoint {
48 remote-endpoint = <&edp_out_panel>; 48 remote-endpoint = <&edp_out_panel>;
49 }; 49 };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
index 0b8f1edbd746..b48a63c3efc3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
@@ -91,7 +91,7 @@
91 pinctrl-0 = <&lcd_panel_reset>; 91 pinctrl-0 = <&lcd_panel_reset>;
92 power-supply = <&vcc3v3_s0>; 92 power-supply = <&vcc3v3_s0>;
93 93
94 ports { 94 port {
95 panel_in_edp: endpoint { 95 panel_in_edp: endpoint {
96 remote-endpoint = <&edp_out_panel>; 96 remote-endpoint = <&edp_out_panel>;
97 }; 97 };
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 3ef443cfbab6..c8432e24207e 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -506,11 +506,15 @@ CONFIG_SND_SOC_ROCKCHIP=m
506CONFIG_SND_SOC_ROCKCHIP_SPDIF=m 506CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
507CONFIG_SND_SOC_ROCKCHIP_RT5645=m 507CONFIG_SND_SOC_ROCKCHIP_RT5645=m
508CONFIG_SND_SOC_RK3399_GRU_SOUND=m 508CONFIG_SND_SOC_RK3399_GRU_SOUND=m
509CONFIG_SND_MESON_AXG_SOUND_CARD=m
509CONFIG_SND_SOC_SAMSUNG=y 510CONFIG_SND_SOC_SAMSUNG=y
510CONFIG_SND_SOC_RCAR=m 511CONFIG_SND_SOC_RCAR=m
511CONFIG_SND_SOC_AK4613=m 512CONFIG_SND_SOC_AK4613=m
512CONFIG_SND_SIMPLE_CARD=m 513CONFIG_SND_SIMPLE_CARD=m
513CONFIG_SND_AUDIO_GRAPH_CARD=m 514CONFIG_SND_AUDIO_GRAPH_CARD=m
515CONFIG_SND_SOC_ES7134=m
516CONFIG_SND_SOC_ES7241=m
517CONFIG_SND_SOC_TAS571X=m
514CONFIG_I2C_HID=m 518CONFIG_I2C_HID=m
515CONFIG_USB=y 519CONFIG_USB=y
516CONFIG_USB_OTG=y 520CONFIG_USB_OTG=y
diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h
index 2173ad32d550..1c9a3a0c5fa5 100644
--- a/arch/arm64/include/asm/asm-prototypes.h
+++ b/arch/arm64/include/asm/asm-prototypes.h
@@ -2,7 +2,7 @@
2#ifndef __ASM_PROTOTYPES_H 2#ifndef __ASM_PROTOTYPES_H
3#define __ASM_PROTOTYPES_H 3#define __ASM_PROTOTYPES_H
4/* 4/*
5 * CONFIG_MODEVERIONS requires a C declaration to generate the appropriate CRC 5 * CONFIG_MODVERSIONS requires a C declaration to generate the appropriate CRC
6 * for each symbol. Since commit: 6 * for each symbol. Since commit:
7 * 7 *
8 * 4efca4ed05cbdfd1 ("kbuild: modversions for EXPORT_SYMBOL() for asm") 8 * 4efca4ed05cbdfd1 ("kbuild: modversions for EXPORT_SYMBOL() for asm")
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 13dd42c3ad4e..926434f413fa 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -58,6 +58,10 @@
58 */ 58 */
59#define ARCH_DMA_MINALIGN (128) 59#define ARCH_DMA_MINALIGN (128)
60 60
61#ifdef CONFIG_KASAN_SW_TAGS
62#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
63#endif
64
61#ifndef __ASSEMBLY__ 65#ifndef __ASSEMBLY__
62 66
63#include <linux/bitops.h> 67#include <linux/bitops.h>
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 3dd3d664c5c5..4658c937e173 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -20,9 +20,6 @@ struct dev_archdata {
20#ifdef CONFIG_IOMMU_API 20#ifdef CONFIG_IOMMU_API
21 void *iommu; /* private IOMMU data */ 21 void *iommu; /* private IOMMU data */
22#endif 22#endif
23#ifdef CONFIG_XEN
24 const struct dma_map_ops *dev_dma_ops;
25#endif
26}; 23};
27 24
28struct pdev_archdata { 25struct pdev_archdata {
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7732d0ba4e60..da3fc7324d68 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -48,6 +48,7 @@
48#define KVM_REQ_SLEEP \ 48#define KVM_REQ_SLEEP \
49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
51#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
51 52
52DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 53DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
53 54
@@ -208,6 +209,13 @@ struct kvm_cpu_context {
208 209
209typedef struct kvm_cpu_context kvm_cpu_context_t; 210typedef struct kvm_cpu_context kvm_cpu_context_t;
210 211
212struct vcpu_reset_state {
213 unsigned long pc;
214 unsigned long r0;
215 bool be;
216 bool reset;
217};
218
211struct kvm_vcpu_arch { 219struct kvm_vcpu_arch {
212 struct kvm_cpu_context ctxt; 220 struct kvm_cpu_context ctxt;
213 221
@@ -297,6 +305,9 @@ struct kvm_vcpu_arch {
297 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 305 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
298 u64 vsesr_el2; 306 u64 vsesr_el2;
299 307
308 /* Additional reset state */
309 struct vcpu_reset_state reset_state;
310
300 /* True when deferrable sysregs are loaded on the physical CPU, 311 /* True when deferrable sysregs are loaded on the physical CPU,
301 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ 312 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
302 bool sysregs_loaded_on_cpu; 313 bool sysregs_loaded_on_cpu;
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index e1ec947e7c0c..0c656850eeea 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -332,6 +332,17 @@ static inline void *phys_to_virt(phys_addr_t x)
332#define virt_addr_valid(kaddr) \ 332#define virt_addr_valid(kaddr) \
333 (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) 333 (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
334 334
335/*
336 * Given that the GIC architecture permits ITS implementations that can only be
337 * configured with a LPI table address once, GICv3 systems with many CPUs may
338 * end up reserving a lot of different regions after a kexec for their LPI
339 * tables (one per CPU), as we are forced to reuse the same memory after kexec
340 * (and thus reserve it persistently with EFI beforehand)
341 */
342#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
343# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
344#endif
345
335#include <asm-generic/memory_model.h> 346#include <asm-generic/memory_model.h>
336 347
337#endif 348#endif
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 7689c7aa1d77..3e8063f4f9d3 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -16,6 +16,8 @@
16#ifndef __ASM_MMU_H 16#ifndef __ASM_MMU_H
17#define __ASM_MMU_H 17#define __ASM_MMU_H
18 18
19#include <asm/cputype.h>
20
19#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ 21#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
20#define USER_ASID_BIT 48 22#define USER_ASID_BIT 48
21#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) 23#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
@@ -44,6 +46,48 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
44 cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); 46 cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
45} 47}
46 48
49static inline bool arm64_kernel_use_ng_mappings(void)
50{
51 bool tx1_bug;
52
53 /* What's a kpti? Use global mappings if we don't know. */
54 if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
55 return false;
56
57 /*
58 * Note: this function is called before the CPU capabilities have
59 * been configured, so our early mappings will be global. If we
60 * later determine that kpti is required, then
61 * kpti_install_ng_mappings() will make them non-global.
62 */
63 if (arm64_kernel_unmapped_at_el0())
64 return true;
65
66 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
67 return false;
68
69 /*
70 * KASLR is enabled so we're going to be enabling kpti on non-broken
71 * CPUs regardless of their susceptibility to Meltdown. Rather
72 * than force everybody to go through the G -> nG dance later on,
73 * just put down non-global mappings from the beginning.
74 */
75 if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
76 tx1_bug = false;
77#ifndef MODULE
78 } else if (!static_branch_likely(&arm64_const_caps_ready)) {
79 extern const struct midr_range cavium_erratum_27456_cpus[];
80
81 tx1_bug = is_midr_in_range_list(read_cpuid_id(),
82 cavium_erratum_27456_cpus);
83#endif
84 } else {
85 tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
86 }
87
88 return !tx1_bug && kaslr_offset() > 0;
89}
90
47typedef void (*bp_hardening_cb_t)(void); 91typedef void (*bp_hardening_cb_t)(void);
48 92
49struct bp_hardening_data { 93struct bp_hardening_data {
diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h
index 2ba6c6b9541f..71abfc7612b2 100644
--- a/arch/arm64/include/asm/neon-intrinsics.h
+++ b/arch/arm64/include/asm/neon-intrinsics.h
@@ -36,4 +36,8 @@
36#include <arm_neon.h> 36#include <arm_neon.h>
37#endif 37#endif
38 38
39#ifdef CONFIG_CC_IS_CLANG
40#pragma clang diagnostic ignored "-Wincompatible-pointer-types"
41#endif
42
39#endif /* __ASM_NEON_INTRINSICS_H */ 43#endif /* __ASM_NEON_INTRINSICS_H */
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 78b942c1bea4..986e41c4c32b 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -37,8 +37,8 @@
37#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) 37#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
38#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) 38#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
39 39
40#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) 40#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
41#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) 41#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
42 42
43#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) 43#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
44#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) 44#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index b3ef061d8b74..d88e56b90b93 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1 +1,77 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
3#define _ASM_ARM64_XEN_PAGE_COHERENT_H
4
5#include <linux/dma-mapping.h>
6#include <asm/page.h>
1#include <xen/arm/page-coherent.h> 7#include <xen/arm/page-coherent.h>
8
9static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
10 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
11{
12 return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
13}
14
15static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
16 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
17{
18 dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
19}
20
21static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
22 dma_addr_t handle, size_t size, enum dma_data_direction dir)
23{
24 unsigned long pfn = PFN_DOWN(handle);
25
26 if (pfn_valid(pfn))
27 dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
28 else
29 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
30}
31
32static inline void xen_dma_sync_single_for_device(struct device *hwdev,
33 dma_addr_t handle, size_t size, enum dma_data_direction dir)
34{
35 unsigned long pfn = PFN_DOWN(handle);
36 if (pfn_valid(pfn))
37 dma_direct_sync_single_for_device(hwdev, handle, size, dir);
38 else
39 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
40}
41
42static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
43 dma_addr_t dev_addr, unsigned long offset, size_t size,
44 enum dma_data_direction dir, unsigned long attrs)
45{
46 unsigned long page_pfn = page_to_xen_pfn(page);
47 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
48 unsigned long compound_pages =
49 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
50 bool local = (page_pfn <= dev_pfn) &&
51 (dev_pfn - page_pfn < compound_pages);
52
53 if (local)
54 dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
55 else
56 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
57}
58
59static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
60 size_t size, enum dma_data_direction dir, unsigned long attrs)
61{
62 unsigned long pfn = PFN_DOWN(handle);
63 /*
64 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
65 * multiple Xen page, it's not possible to have a mix of local and
66 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
67 * foreign mfn will always return false. If the page is local we can
68 * safely call the native dma_ops function, otherwise we call the xen
69 * specific function.
70 */
71 if (pfn_valid(pfn))
72 dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
73 else
74 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
75}
76
77#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 09ac548c9d44..9950bb0cbd52 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -553,7 +553,7 @@ static const struct midr_range arm64_repeat_tlbi_cpus[] = {
553#endif 553#endif
554 554
555#ifdef CONFIG_CAVIUM_ERRATUM_27456 555#ifdef CONFIG_CAVIUM_ERRATUM_27456
556static const struct midr_range cavium_erratum_27456_cpus[] = { 556const struct midr_range cavium_erratum_27456_cpus[] = {
557 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 557 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
558 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), 558 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
559 /* Cavium ThunderX, T81 pass 1.0 */ 559 /* Cavium ThunderX, T81 pass 1.0 */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4f272399de89..f6d84e2c92fe 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -983,7 +983,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
983 983
984 /* Useful for KASLR robustness */ 984 /* Useful for KASLR robustness */
985 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 985 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
986 return true; 986 return kaslr_offset() > 0;
987 987
988 /* Don't force KPTI for CPUs that are not vulnerable */ 988 /* Don't force KPTI for CPUs that are not vulnerable */
989 if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list)) 989 if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
@@ -1003,7 +1003,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1003 static bool kpti_applied = false; 1003 static bool kpti_applied = false;
1004 int cpu = smp_processor_id(); 1004 int cpu = smp_processor_id();
1005 1005
1006 if (kpti_applied) 1006 /*
1007 * We don't need to rewrite the page-tables if either we've done
1008 * it already or we have KASLR enabled and therefore have not
1009 * created any global mappings at all.
1010 */
1011 if (kpti_applied || kaslr_offset() > 0)
1007 return; 1012 return;
1008 1013
1009 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); 1014 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c7213674cb24..eecf7927dab0 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -475,6 +475,7 @@ ENDPROC(__primary_switched)
475 475
476ENTRY(kimage_vaddr) 476ENTRY(kimage_vaddr)
477 .quad _text - TEXT_OFFSET 477 .quad _text - TEXT_OFFSET
478EXPORT_SYMBOL(kimage_vaddr)
478 479
479/* 480/*
480 * If we're fortunate enough to boot at EL2, ensure that the world is 481 * If we're fortunate enough to boot at EL2, ensure that the world is
@@ -538,8 +539,7 @@ set_hcr:
538 /* GICv3 system register access */ 539 /* GICv3 system register access */
539 mrs x0, id_aa64pfr0_el1 540 mrs x0, id_aa64pfr0_el1
540 ubfx x0, x0, #24, #4 541 ubfx x0, x0, #24, #4
541 cmp x0, #1 542 cbz x0, 3f
542 b.ne 3f
543 543
544 mrs_s x0, SYS_ICC_SRE_EL2 544 mrs_s x0, SYS_ICC_SRE_EL2
545 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 545 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 29cdc99688f3..9859e1178e6b 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
299 dcache_clean_range(__idmap_text_start, __idmap_text_end); 299 dcache_clean_range(__idmap_text_start, __idmap_text_end);
300 300
301 /* Clean kvm setup code to PoC? */ 301 /* Clean kvm setup code to PoC? */
302 if (el2_reset_needed()) 302 if (el2_reset_needed()) {
303 dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); 303 dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
304 dcache_clean_range(__hyp_text_start, __hyp_text_end);
305 }
304 306
305 /* make the crash dump kernel image protected again */ 307 /* make the crash dump kernel image protected again */
306 crash_post_resume(); 308 crash_post_resume();
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index e1261fbaa374..17f325ba831e 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -28,6 +28,8 @@
28#include <asm/virt.h> 28#include <asm/virt.h>
29 29
30 .text 30 .text
31 .pushsection .hyp.text, "ax"
32
31 .align 11 33 .align 11
32 34
33ENTRY(__hyp_stub_vectors) 35ENTRY(__hyp_stub_vectors)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index f0e6ab8abe9c..b09b6f75f759 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -14,6 +14,7 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#include <asm/cacheflush.h>
17#include <asm/fixmap.h> 18#include <asm/fixmap.h>
18#include <asm/kernel-pgtable.h> 19#include <asm/kernel-pgtable.h>
19#include <asm/memory.h> 20#include <asm/memory.h>
@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
43 return ret; 44 return ret;
44} 45}
45 46
46static __init const u8 *get_cmdline(void *fdt) 47static __init const u8 *kaslr_get_cmdline(void *fdt)
47{ 48{
48 static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; 49 static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
49 50
@@ -87,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
87 * we end up running with module randomization disabled. 88 * we end up running with module randomization disabled.
88 */ 89 */
89 module_alloc_base = (u64)_etext - MODULES_VSIZE; 90 module_alloc_base = (u64)_etext - MODULES_VSIZE;
91 __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
90 92
91 /* 93 /*
92 * Try to map the FDT early. If this fails, we simply bail, 94 * Try to map the FDT early. If this fails, we simply bail,
@@ -109,7 +111,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
109 * Check if 'nokaslr' appears on the command line, and 111 * Check if 'nokaslr' appears on the command line, and
110 * return 0 if that is the case. 112 * return 0 if that is the case.
111 */ 113 */
112 cmdline = get_cmdline(fdt); 114 cmdline = kaslr_get_cmdline(fdt);
113 str = strstr(cmdline, "nokaslr"); 115 str = strstr(cmdline, "nokaslr");
114 if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) 116 if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
115 return 0; 117 return 0;
@@ -169,5 +171,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
169 module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; 171 module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
170 module_alloc_base &= PAGE_MASK; 172 module_alloc_base &= PAGE_MASK;
171 173
174 __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
175 __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
176
172 return offset; 177 return offset;
173} 178}
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 10e33860e47a..58871333737a 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -87,7 +87,9 @@ static int setup_dtb(struct kimage *image,
87 87
88 /* add kaslr-seed */ 88 /* add kaslr-seed */
89 ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED); 89 ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED);
90 if (ret && (ret != -FDT_ERR_NOTFOUND)) 90 if (ret == -FDT_ERR_NOTFOUND)
91 ret = 0;
92 else if (ret)
91 goto out; 93 goto out;
92 94
93 if (rng_is_initialized()) { 95 if (rng_is_initialized()) {
@@ -118,10 +120,12 @@ static int create_dtb(struct kimage *image,
118{ 120{
119 void *buf; 121 void *buf;
120 size_t buf_size; 122 size_t buf_size;
123 size_t cmdline_len;
121 int ret; 124 int ret;
122 125
126 cmdline_len = cmdline ? strlen(cmdline) : 0;
123 buf_size = fdt_totalsize(initial_boot_params) 127 buf_size = fdt_totalsize(initial_boot_params)
124 + strlen(cmdline) + DTB_EXTRA_SPACE; 128 + cmdline_len + DTB_EXTRA_SPACE;
125 129
126 for (;;) { 130 for (;;) {
127 buf = vmalloc(buf_size); 131 buf = vmalloc(buf_size);
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2a5b338b2542..f17afb99890c 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
478 addr < (unsigned long)__entry_text_end) || 478 addr < (unsigned long)__entry_text_end) ||
479 (addr >= (unsigned long)__idmap_text_start && 479 (addr >= (unsigned long)__idmap_text_start &&
480 addr < (unsigned long)__idmap_text_end) || 480 addr < (unsigned long)__idmap_text_end) ||
481 (addr >= (unsigned long)__hyp_text_start &&
482 addr < (unsigned long)__hyp_text_end) ||
481 !!search_exception_tables(addr)) 483 !!search_exception_tables(addr))
482 return true; 484 return true;
483 485
484 if (!is_kernel_in_hyp_mode()) { 486 if (!is_kernel_in_hyp_mode()) {
485 if ((addr >= (unsigned long)__hyp_text_start && 487 if ((addr >= (unsigned long)__hyp_idmap_text_start &&
486 addr < (unsigned long)__hyp_text_end) ||
487 (addr >= (unsigned long)__hyp_idmap_text_start &&
488 addr < (unsigned long)__hyp_idmap_text_end)) 488 addr < (unsigned long)__hyp_idmap_text_end))
489 return true; 489 return true;
490 } 490 }
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 9dce33b0e260..ddaea0fd2fa4 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1702,19 +1702,20 @@ void syscall_trace_exit(struct pt_regs *regs)
1702} 1702}
1703 1703
1704/* 1704/*
1705 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a 1705 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1706 * We also take into account DIT (bit 24), which is not yet documented, and 1706 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1707 * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be 1707 * not described in ARM DDI 0487D.a.
1708 * allocated an EL0 meaning in future. 1708 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1709 * be allocated an EL0 meaning in future.
1709 * Userspace cannot use these until they have an architectural meaning. 1710 * Userspace cannot use these until they have an architectural meaning.
1710 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 1711 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1711 * We also reserve IL for the kernel; SS is handled dynamically. 1712 * We also reserve IL for the kernel; SS is handled dynamically.
1712 */ 1713 */
1713#define SPSR_EL1_AARCH64_RES0_BITS \ 1714#define SPSR_EL1_AARCH64_RES0_BITS \
1714 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ 1715 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1715 GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) 1716 GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
1716#define SPSR_EL1_AARCH32_RES0_BITS \ 1717#define SPSR_EL1_AARCH32_RES0_BITS \
1717 (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) 1718 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1718 1719
1719static int valid_compat_regs(struct user_pt_regs *regs) 1720static int valid_compat_regs(struct user_pt_regs *regs)
1720{ 1721{
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 4b0e1231625c..009849328289 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p)
313 arm64_memblock_init(); 313 arm64_memblock_init();
314 314
315 paging_init(); 315 paging_init();
316 efi_apply_persistent_mem_reservations();
317 316
318 acpi_table_upgrade(); 317 acpi_table_upgrade();
319 318
@@ -340,6 +339,9 @@ void __init setup_arch(char **cmdline_p)
340 smp_init_cpus(); 339 smp_init_cpus();
341 smp_build_mpidr_hash(); 340 smp_build_mpidr_hash();
342 341
342 /* Init percpu seeds for random tags after cpus are set up. */
343 kasan_init_tags();
344
343#ifdef CONFIG_ARM64_SW_TTBR0_PAN 345#ifdef CONFIG_ARM64_SW_TTBR0_PAN
344 /* 346 /*
345 * Make sure init_thread_info.ttbr0 always generates translation 347 * Make sure init_thread_info.ttbr0 always generates translation
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index b0b1478094b4..421ebf6f7086 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -23,6 +23,7 @@
23#include <kvm/arm_psci.h> 23#include <kvm/arm_psci.h>
24 24
25#include <asm/cpufeature.h> 25#include <asm/cpufeature.h>
26#include <asm/kprobes.h>
26#include <asm/kvm_asm.h> 27#include <asm/kvm_asm.h>
27#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
28#include <asm/kvm_host.h> 29#include <asm/kvm_host.h>
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
107 108
108 write_sysreg(kvm_get_hyp_vector(), vbar_el1); 109 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
109} 110}
111NOKPROBE_SYMBOL(activate_traps_vhe);
110 112
111static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) 113static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
112{ 114{
@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void)
154 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); 156 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
155 write_sysreg(vectors, vbar_el1); 157 write_sysreg(vectors, vbar_el1);
156} 158}
159NOKPROBE_SYMBOL(deactivate_traps_vhe);
157 160
158static void __hyp_text __deactivate_traps_nvhe(void) 161static void __hyp_text __deactivate_traps_nvhe(void)
159{ 162{
@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
513 516
514 return exit_code; 517 return exit_code;
515} 518}
519NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
516 520
517/* Switch to the guest for legacy non-VHE systems */ 521/* Switch to the guest for legacy non-VHE systems */
518int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) 522int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
620 read_sysreg_el2(esr), read_sysreg_el2(far), 624 read_sysreg_el2(esr), read_sysreg_el2(far),
621 read_sysreg(hpfar_el2), par, vcpu); 625 read_sysreg(hpfar_el2), par, vcpu);
622} 626}
627NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
623 628
624void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) 629void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
625{ 630{
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 68d6f7c3b237..b426e2cf973c 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -18,6 +18,7 @@
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/kvm_host.h> 19#include <linux/kvm_host.h>
20 20
21#include <asm/kprobes.h>
21#include <asm/kvm_asm.h> 22#include <asm/kvm_asm.h>
22#include <asm/kvm_emulate.h> 23#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h> 24#include <asm/kvm_hyp.h>
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
98{ 99{
99 __sysreg_save_common_state(ctxt); 100 __sysreg_save_common_state(ctxt);
100} 101}
102NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
101 103
102void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) 104void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
103{ 105{
104 __sysreg_save_common_state(ctxt); 106 __sysreg_save_common_state(ctxt);
105 __sysreg_save_el2_return_state(ctxt); 107 __sysreg_save_el2_return_state(ctxt);
106} 108}
109NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
107 110
108static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) 111static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
109{ 112{
@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
188{ 191{
189 __sysreg_restore_common_state(ctxt); 192 __sysreg_restore_common_state(ctxt);
190} 193}
194NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
191 195
192void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) 196void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
193{ 197{
194 __sysreg_restore_common_state(ctxt); 198 __sysreg_restore_common_state(ctxt);
195 __sysreg_restore_el2_return_state(ctxt); 199 __sysreg_restore_el2_return_state(ctxt);
196} 200}
201NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
197 202
198void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) 203void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
199{ 204{
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index b72a3dd56204..f16a5f8ff2b4 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -32,6 +32,7 @@
32#include <asm/kvm_arm.h> 32#include <asm/kvm_arm.h>
33#include <asm/kvm_asm.h> 33#include <asm/kvm_asm.h>
34#include <asm/kvm_coproc.h> 34#include <asm/kvm_coproc.h>
35#include <asm/kvm_emulate.h>
35#include <asm/kvm_mmu.h> 36#include <asm/kvm_mmu.h>
36 37
37/* Maximum phys_shift supported for any VM on this host */ 38/* Maximum phys_shift supported for any VM on this host */
@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
105 * This function finds the right table above and sets the registers on 106 * This function finds the right table above and sets the registers on
106 * the virtual CPU struct to their architecturally defined reset 107 * the virtual CPU struct to their architecturally defined reset
107 * values. 108 * values.
109 *
110 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
111 * ioctl or as part of handling a request issued by another VCPU in the PSCI
112 * handling code. In the first case, the VCPU will not be loaded, and in the
113 * second case the VCPU will be loaded. Because this function operates purely
114 * on the memory-backed valus of system registers, we want to do a full put if
115 * we were loaded (handling a request) and load the values back at the end of
116 * the function. Otherwise we leave the state alone. In both cases, we
117 * disable preemption around the vcpu reset as we would otherwise race with
118 * preempt notifiers which also call put/load.
108 */ 119 */
109int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 120int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
110{ 121{
111 const struct kvm_regs *cpu_reset; 122 const struct kvm_regs *cpu_reset;
123 int ret = -EINVAL;
124 bool loaded;
125
126 preempt_disable();
127 loaded = (vcpu->cpu != -1);
128 if (loaded)
129 kvm_arch_vcpu_put(vcpu);
112 130
113 switch (vcpu->arch.target) { 131 switch (vcpu->arch.target) {
114 default: 132 default:
115 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 133 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
116 if (!cpu_has_32bit_el1()) 134 if (!cpu_has_32bit_el1())
117 return -EINVAL; 135 goto out;
118 cpu_reset = &default_regs_reset32; 136 cpu_reset = &default_regs_reset32;
119 } else { 137 } else {
120 cpu_reset = &default_regs_reset; 138 cpu_reset = &default_regs_reset;
@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
129 /* Reset system registers */ 147 /* Reset system registers */
130 kvm_reset_sys_regs(vcpu); 148 kvm_reset_sys_regs(vcpu);
131 149
150 /*
151 * Additional reset state handling that PSCI may have imposed on us.
152 * Must be done after all the sys_reg reset.
153 */
154 if (vcpu->arch.reset_state.reset) {
155 unsigned long target_pc = vcpu->arch.reset_state.pc;
156
157 /* Gracefully handle Thumb2 entry point */
158 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
159 target_pc &= ~1UL;
160 vcpu_set_thumb(vcpu);
161 }
162
163 /* Propagate caller endianness */
164 if (vcpu->arch.reset_state.be)
165 kvm_vcpu_set_be(vcpu);
166
167 *vcpu_pc(vcpu) = target_pc;
168 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
169
170 vcpu->arch.reset_state.reset = false;
171 }
172
132 /* Reset PMU */ 173 /* Reset PMU */
133 kvm_pmu_vcpu_reset(vcpu); 174 kvm_pmu_vcpu_reset(vcpu);
134 175
@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
137 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 178 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
138 179
139 /* Reset timer */ 180 /* Reset timer */
140 return kvm_timer_vcpu_reset(vcpu); 181 ret = kvm_timer_vcpu_reset(vcpu);
182out:
183 if (loaded)
184 kvm_arch_vcpu_load(vcpu, smp_processor_id());
185 preempt_enable();
186 return ret;
141} 187}
142 188
143void kvm_set_ipa_limit(void) 189void kvm_set_ipa_limit(void)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e3e37228ae4e..c936aa40c3f4 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
314 return read_zero(vcpu, p); 314 return read_zero(vcpu, p);
315} 315}
316 316
317static bool trap_undef(struct kvm_vcpu *vcpu, 317/*
318 struct sys_reg_params *p, 318 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
319 const struct sys_reg_desc *r) 319 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
320 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
321 * treat it separately.
322 */
323static bool trap_loregion(struct kvm_vcpu *vcpu,
324 struct sys_reg_params *p,
325 const struct sys_reg_desc *r)
320{ 326{
321 kvm_inject_undefined(vcpu); 327 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
322 return false; 328 u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
329 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
330
331 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
332 kvm_inject_undefined(vcpu);
333 return false;
334 }
335
336 if (p->is_write && sr == SYS_LORID_EL1)
337 return write_to_read_only(vcpu, p, r);
338
339 return trap_raz_wi(vcpu, p, r);
323} 340}
324 341
325static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 342static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
1048 if (val & ptrauth_mask) 1065 if (val & ptrauth_mask)
1049 kvm_debug("ptrauth unsupported for guests, suppressing\n"); 1066 kvm_debug("ptrauth unsupported for guests, suppressing\n");
1050 val &= ~ptrauth_mask; 1067 val &= ~ptrauth_mask;
1051 } else if (id == SYS_ID_AA64MMFR1_EL1) {
1052 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
1053 kvm_debug("LORegions unsupported for guests, suppressing\n");
1054
1055 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
1056 } 1068 }
1057 1069
1058 return val; 1070 return val;
@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
1338 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, 1350 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1339 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 1351 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1340 1352
1341 { SYS_DESC(SYS_LORSA_EL1), trap_undef }, 1353 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1342 { SYS_DESC(SYS_LOREA_EL1), trap_undef }, 1354 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1343 { SYS_DESC(SYS_LORN_EL1), trap_undef }, 1355 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1344 { SYS_DESC(SYS_LORC_EL1), trap_undef }, 1356 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1345 { SYS_DESC(SYS_LORID_EL1), trap_undef }, 1357 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1346 1358
1347 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, 1359 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1348 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, 1360 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2596 table = get_target_table(vcpu->arch.target, true, &num); 2608 table = get_target_table(vcpu->arch.target, true, &num);
2597 reset_sys_reg_descs(vcpu, table, num); 2609 reset_sys_reg_descs(vcpu, table, num);
2598 2610
2599 for (num = 1; num < NR_SYS_REGS; num++) 2611 for (num = 1; num < NR_SYS_REGS; num++) {
2600 if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) 2612 if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
2601 panic("Didn't reset __vcpu_sys_reg(%zi)", num); 2613 "Didn't reset __vcpu_sys_reg(%zi)\n", num))
2614 break;
2615 }
2602} 2616}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index fb0908456a1f..78c0a72f822c 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -466,9 +466,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
466 __iommu_setup_dma_ops(dev, dma_base, size, iommu); 466 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
467 467
468#ifdef CONFIG_XEN 468#ifdef CONFIG_XEN
469 if (xen_initial_domain()) { 469 if (xen_initial_domain())
470 dev->archdata.dev_dma_ops = dev->dma_ops;
471 dev->dma_ops = xen_dma_ops; 470 dev->dma_ops = xen_dma_ops;
472 }
473#endif 471#endif
474} 472}
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index fcb1f2a6d7c6..99bb8facb5cb 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
286 286
287} 287}
288 288
289static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start) 289static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
290 unsigned long end)
290{ 291{
291 pte_t *ptep = pte_offset_kernel(pmdp, 0UL); 292 unsigned long addr = start;
292 unsigned long addr; 293 pte_t *ptep = pte_offset_kernel(pmdp, start);
293 unsigned i;
294 294
295 for (i = 0; i < PTRS_PER_PTE; i++, ptep++) { 295 do {
296 addr = start + i * PAGE_SIZE;
297 note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); 296 note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
298 } 297 } while (ptep++, addr += PAGE_SIZE, addr != end);
299} 298}
300 299
301static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start) 300static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
301 unsigned long end)
302{ 302{
303 pmd_t *pmdp = pmd_offset(pudp, 0UL); 303 unsigned long next, addr = start;
304 unsigned long addr; 304 pmd_t *pmdp = pmd_offset(pudp, start);
305 unsigned i;
306 305
307 for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { 306 do {
308 pmd_t pmd = READ_ONCE(*pmdp); 307 pmd_t pmd = READ_ONCE(*pmdp);
308 next = pmd_addr_end(addr, end);
309 309
310 addr = start + i * PMD_SIZE;
311 if (pmd_none(pmd) || pmd_sect(pmd)) { 310 if (pmd_none(pmd) || pmd_sect(pmd)) {
312 note_page(st, addr, 3, pmd_val(pmd)); 311 note_page(st, addr, 3, pmd_val(pmd));
313 } else { 312 } else {
314 BUG_ON(pmd_bad(pmd)); 313 BUG_ON(pmd_bad(pmd));
315 walk_pte(st, pmdp, addr); 314 walk_pte(st, pmdp, addr, next);
316 } 315 }
317 } 316 } while (pmdp++, addr = next, addr != end);
318} 317}
319 318
320static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start) 319static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
320 unsigned long end)
321{ 321{
322 pud_t *pudp = pud_offset(pgdp, 0UL); 322 unsigned long next, addr = start;
323 unsigned long addr; 323 pud_t *pudp = pud_offset(pgdp, start);
324 unsigned i;
325 324
326 for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { 325 do {
327 pud_t pud = READ_ONCE(*pudp); 326 pud_t pud = READ_ONCE(*pudp);
327 next = pud_addr_end(addr, end);
328 328
329 addr = start + i * PUD_SIZE;
330 if (pud_none(pud) || pud_sect(pud)) { 329 if (pud_none(pud) || pud_sect(pud)) {
331 note_page(st, addr, 2, pud_val(pud)); 330 note_page(st, addr, 2, pud_val(pud));
332 } else { 331 } else {
333 BUG_ON(pud_bad(pud)); 332 BUG_ON(pud_bad(pud));
334 walk_pmd(st, pudp, addr); 333 walk_pmd(st, pudp, addr, next);
335 } 334 }
336 } 335 } while (pudp++, addr = next, addr != end);
337} 336}
338 337
339static void walk_pgd(struct pg_state *st, struct mm_struct *mm, 338static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
340 unsigned long start) 339 unsigned long start)
341{ 340{
342 pgd_t *pgdp = pgd_offset(mm, 0UL); 341 unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
343 unsigned i; 342 unsigned long next, addr = start;
344 unsigned long addr; 343 pgd_t *pgdp = pgd_offset(mm, start);
345 344
346 for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) { 345 do {
347 pgd_t pgd = READ_ONCE(*pgdp); 346 pgd_t pgd = READ_ONCE(*pgdp);
347 next = pgd_addr_end(addr, end);
348 348
349 addr = start + i * PGDIR_SIZE;
350 if (pgd_none(pgd)) { 349 if (pgd_none(pgd)) {
351 note_page(st, addr, 1, pgd_val(pgd)); 350 note_page(st, addr, 1, pgd_val(pgd));
352 } else { 351 } else {
353 BUG_ON(pgd_bad(pgd)); 352 BUG_ON(pgd_bad(pgd));
354 walk_pud(st, pgdp, addr); 353 walk_pud(st, pgdp, addr, next);
355 } 354 }
356 } 355 } while (pgdp++, addr = next, addr != end);
357} 356}
358 357
359void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) 358void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 30695a868107..5c9073bace83 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
33 __clean_dcache_area_pou(kaddr, len); 33 __clean_dcache_area_pou(kaddr, len);
34 __flush_icache_all(); 34 __flush_icache_all();
35 } else { 35 } else {
36 flush_icache_range(addr, addr + len); 36 /*
37 * Don't issue kick_all_cpus_sync() after I-cache invalidation
38 * for user mappings.
39 */
40 __flush_icache_range(addr, addr + len);
37 } 41 }
38} 42}
39 43
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 4b55b15707a3..f37a86d2a69d 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -252,8 +252,6 @@ void __init kasan_init(void)
252 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); 252 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
253 cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 253 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
254 254
255 kasan_init_tags();
256
257 /* At this point kasan is fully initialized. Enable error messages */ 255 /* At this point kasan is fully initialized. Enable error messages */
258 init_task.kasan_depth = 0; 256 init_task.kasan_depth = 0;
259 pr_info("KernelAddressSanitizer initialized\n"); 257 pr_info("KernelAddressSanitizer initialized\n");
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 33a2c94fed0d..63b4a1705182 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += pgalloc.h
30generic-y += preempt.h 30generic-y += preempt.h
31generic-y += segment.h 31generic-y += segment.h
32generic-y += serial.h 32generic-y += serial.h
33generic-y += shmparam.h
33generic-y += tlbflush.h 34generic-y += tlbflush.h
34generic-y += topology.h 35generic-y += topology.h
35generic-y += trace_clock.h 36generic-y += trace_clock.h
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index ecae6b358f95..c1dfa9c10e36 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -15,6 +15,31 @@ extern void iounmap(void *addr);
15extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr, 15extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
16 size_t size, unsigned long flags); 16 size_t size, unsigned long flags);
17 17
18/*
19 * I/O memory access primitives. Reads are ordered relative to any
20 * following Normal memory access. Writes are ordered relative to any prior
21 * Normal memory access.
22 *
23 * For CACHEV1 (807, 810), store instruction could fast retire, so we need
24 * another mb() to prevent st fast retire.
25 *
26 * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't
27 * fast retire.
28 */
29#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; })
30#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; })
31#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; })
32
33#ifdef CONFIG_CPU_HAS_CACHEV2
34#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); })
35#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); })
36#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); })
37#else
38#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); })
39#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); })
40#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
41#endif
42
18#define ioremap_nocache(phy, sz) ioremap(phy, sz) 43#define ioremap_nocache(phy, sz) ioremap(phy, sz)
19#define ioremap_wc ioremap_nocache 44#define ioremap_wc ioremap_nocache
20#define ioremap_wt ioremap_nocache 45#define ioremap_wt ioremap_nocache
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bf4f4a0e140e..d213bb47b717 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -24,41 +24,34 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
24 24
25extern void pgd_init(unsigned long *p); 25extern void pgd_init(unsigned long *p);
26 26
27static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 27static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
28 unsigned long address)
29{ 28{
30 pte_t *pte; 29 pte_t *pte;
31 unsigned long *kaddr, i; 30 unsigned long i;
32 31
33 pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, 32 pte = (pte_t *) __get_free_page(GFP_KERNEL);
34 PTE_ORDER); 33 if (!pte)
35 kaddr = (unsigned long *)pte; 34 return NULL;
36 if (address & 0x80000000) 35
37 for (i = 0; i < (PAGE_SIZE/4); i++) 36 for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
38 *(kaddr + i) = 0x1; 37 (pte + i)->pte_low = _PAGE_GLOBAL;
39 else
40 clear_page(kaddr);
41 38
42 return pte; 39 return pte;
43} 40}
44 41
45static inline struct page *pte_alloc_one(struct mm_struct *mm, 42static inline struct page *pte_alloc_one(struct mm_struct *mm)
46 unsigned long address)
47{ 43{
48 struct page *pte; 44 struct page *pte;
49 unsigned long *kaddr, i; 45
50 46 pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
51 pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER); 47 if (!pte)
52 if (pte) { 48 return NULL;
53 kaddr = kmap_atomic(pte); 49
54 if (address & 0x80000000) { 50 if (!pgtable_page_ctor(pte)) {
55 for (i = 0; i < (PAGE_SIZE/4); i++) 51 __free_page(pte);
56 *(kaddr + i) = 0x1; 52 return NULL;
57 } else
58 clear_page(kaddr);
59 kunmap_atomic(kaddr);
60 pgtable_page_ctor(pte);
61 } 53 }
54
62 return pte; 55 return pte;
63} 56}
64 57
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index edfcbb25fd9f..dcea277c09ae 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -45,8 +45,8 @@
45 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) 45 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address))
46#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 46#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
47#define pte_clear(mm, addr, ptep) set_pte((ptep), \ 47#define pte_clear(mm, addr, ptep) set_pte((ptep), \
48 (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) 48 (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
49#define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) 49#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
50#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 50#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
51#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) 51#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
52#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ 52#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
@@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte)
241 241
242#define pgd_index(address) ((address) >> PGDIR_SHIFT) 242#define pgd_index(address) ((address) >> PGDIR_SHIFT)
243 243
244#define __HAVE_PHYS_MEM_ACCESS_PROT
245struct file;
246extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
247 unsigned long size, pgprot_t vma_prot);
248
244/* 249/*
245 * Macro to make mark a page protection value as "uncacheable". Note 250 * Macro to make mark a page protection value as "uncacheable". Note
246 * that "protection" is really a misnomer here as the protection value 251 * that "protection" is really a misnomer here as the protection value
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
index 8f454810514f..21e0bd5293dd 100644
--- a/arch/csky/include/asm/processor.h
+++ b/arch/csky/include/asm/processor.h
@@ -49,7 +49,7 @@ struct thread_struct {
49}; 49};
50 50
51#define INIT_THREAD { \ 51#define INIT_THREAD { \
52 .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ 52 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
53 .sr = DEFAULT_PSR_VALUE, \ 53 .sr = DEFAULT_PSR_VALUE, \
54} 54}
55 55
@@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p);
95#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) 95#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp)
96 96
97#define task_pt_regs(p) \ 97#define task_pt_regs(p) \
98 ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) 98 ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
99 99
100#define cpu_relax() barrier() 100#define cpu_relax() barrier()
101 101
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c
index 659253e9989c..d67f9777cfd9 100644
--- a/arch/csky/kernel/dumpstack.c
+++ b/arch/csky/kernel/dumpstack.c
@@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack)
38 if (task) 38 if (task)
39 stack = (unsigned long *)thread_saved_fp(task); 39 stack = (unsigned long *)thread_saved_fp(task);
40 else 40 else
41#ifdef CONFIG_STACKTRACE
42 asm volatile("mov %0, r8\n":"=r"(stack)::"memory");
43#else
41 stack = (unsigned long *)&stack; 44 stack = (unsigned long *)&stack;
45#endif
42 } 46 }
43 47
44 show_trace(stack); 48 show_trace(stack);
diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c
index 65abab0c7a47..b5ad7d9de18c 100644
--- a/arch/csky/kernel/module.c
+++ b/arch/csky/kernel/module.c
@@ -12,7 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <asm/pgtable.h> 13#include <asm/pgtable.h>
14 14
15#if defined(__CSKYABIV2__) 15#ifdef CONFIG_CPU_CK810
16#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000) 16#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000)
17#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0) 17#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0)
18 18
@@ -25,6 +25,26 @@
25 *(uint16_t *)(addr) = 0xE8Fa; \ 25 *(uint16_t *)(addr) = 0xE8Fa; \
26 *((uint16_t *)(addr) + 1) = 0x0000; \ 26 *((uint16_t *)(addr) + 1) = 0x0000; \
27} while (0) 27} while (0)
28
29static void jsri_2_lrw_jsr(uint32_t *location)
30{
31 uint16_t *location_tmp = (uint16_t *)location;
32
33 if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
34 return;
35
36 if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
37 /* jsri 0x... --> lrw r26, 0x... */
38 CHANGE_JSRI_TO_LRW(location);
39 /* lsli r0, r0 --> jsr r26 */
40 SET_JSR32_R26(location + 1);
41 }
42}
43#else
44static void inline jsri_2_lrw_jsr(uint32_t *location)
45{
46 return;
47}
28#endif 48#endif
29 49
30int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, 50int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
@@ -35,9 +55,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
35 Elf32_Sym *sym; 55 Elf32_Sym *sym;
36 uint32_t *location; 56 uint32_t *location;
37 short *temp; 57 short *temp;
38#if defined(__CSKYABIV2__)
39 uint16_t *location_tmp;
40#endif
41 58
42 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 59 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
43 /* This is where to make the change */ 60 /* This is where to make the change */
@@ -59,18 +76,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
59 case R_CSKY_PCRELJSR_IMM11BY2: 76 case R_CSKY_PCRELJSR_IMM11BY2:
60 break; 77 break;
61 case R_CSKY_PCRELJSR_IMM26BY2: 78 case R_CSKY_PCRELJSR_IMM26BY2:
62#if defined(__CSKYABIV2__) 79 jsri_2_lrw_jsr(location);
63 location_tmp = (uint16_t *)location;
64 if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
65 break;
66
67 if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
68 /* jsri 0x... --> lrw r26, 0x... */
69 CHANGE_JSRI_TO_LRW(location);
70 /* lsli r0, r0 --> jsr r26 */
71 SET_JSR32_R26(location + 1);
72 }
73#endif
74 break; 80 break;
75 case R_CSKY_ADDR_HI16: 81 case R_CSKY_ADDR_HI16:
76 temp = ((short *)location) + 1; 82 temp = ((short *)location) + 1;
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index 57f1afe19a52..f2f12fff36f7 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -8,6 +8,7 @@
8#include <linux/ptrace.h> 8#include <linux/ptrace.h>
9#include <linux/regset.h> 9#include <linux/regset.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/sched/task_stack.h>
11#include <linux/signal.h> 12#include <linux/signal.h>
12#include <linux/smp.h> 13#include <linux/smp.h>
13#include <linux/uaccess.h> 14#include <linux/uaccess.h>
@@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target,
159static const struct user_regset csky_regsets[] = { 160static const struct user_regset csky_regsets[] = {
160 [REGSET_GPR] = { 161 [REGSET_GPR] = {
161 .core_note_type = NT_PRSTATUS, 162 .core_note_type = NT_PRSTATUS,
162 .n = ELF_NGREG, 163 .n = sizeof(struct pt_regs) / sizeof(u32),
163 .size = sizeof(u32), 164 .size = sizeof(u32),
164 .align = sizeof(u32), 165 .align = sizeof(u32),
165 .get = &gpr_get, 166 .get = &gpr_get,
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index ddc4dd79f282..b07a534b3062 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
160{ 160{
161 unsigned long mask = 1 << cpu; 161 unsigned long mask = 1 << cpu;
162 162
163 secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; 163 secondary_stack =
164 (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
164 secondary_hint = mfcr("cr31"); 165 secondary_hint = mfcr("cr31");
165 secondary_ccr = mfcr("cr18"); 166 secondary_ccr = mfcr("cr18");
166 167
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c
index cb7c03e5cd21..8473b6bdf512 100644
--- a/arch/csky/mm/ioremap.c
+++ b/arch/csky/mm/ioremap.c
@@ -46,3 +46,17 @@ void iounmap(void __iomem *addr)
46 vunmap((void *)((unsigned long)addr & PAGE_MASK)); 46 vunmap((void *)((unsigned long)addr & PAGE_MASK));
47} 47}
48EXPORT_SYMBOL(iounmap); 48EXPORT_SYMBOL(iounmap);
49
50pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
51 unsigned long size, pgprot_t vma_prot)
52{
53 if (!pfn_valid(pfn)) {
54 vma_prot.pgprot |= _PAGE_SO;
55 return pgprot_noncached(vma_prot);
56 } else if (file->f_flags & O_SYNC) {
57 return pgprot_noncached(vma_prot);
58 }
59
60 return vma_prot;
61}
62EXPORT_SYMBOL(phys_mem_access_prot);
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index 4003ddc616e1..f801f3708a89 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/
37 37
38boot := arch/h8300/boot 38boot := arch/h8300/boot
39 39
40archmrproper:
41
42archclean: 40archclean:
43 $(Q)$(MAKE) $(clean)=$(boot) 41 $(Q)$(MAKE) $(clean)=$(boot)
44 42
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index cd400d353d18..961c1dc064e1 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -40,6 +40,7 @@ generic-y += preempt.h
40generic-y += scatterlist.h 40generic-y += scatterlist.h
41generic-y += sections.h 41generic-y += sections.h
42generic-y += serial.h 42generic-y += serial.h
43generic-y += shmparam.h
43generic-y += sizes.h 44generic-y += sizes.h
44generic-y += spinlock.h 45generic-y += spinlock.h
45generic-y += timex.h 46generic-y += timex.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 47c4da3d64a4..b25fd42aa0f4 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += rwsem.h
30generic-y += sections.h 30generic-y += sections.h
31generic-y += segment.h 31generic-y += segment.h
32generic-y += serial.h 32generic-y += serial.h
33generic-y += shmparam.h
33generic-y += sizes.h 34generic-y += sizes.h
34generic-y += topology.h 35generic-y += topology.h
35generic-y += trace_clock.h 36generic-y += trace_clock.h
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 320d86f192ee..171290f9f1de 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig
16NM := $(CROSS_COMPILE)nm -B 16NM := $(CROSS_COMPILE)nm -B
17READELF := $(CROSS_COMPILE)readelf 17READELF := $(CROSS_COMPILE)readelf
18 18
19export AWK
20
21CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ 19CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
22 20
23OBJCOPYFLAGS := --strip-all 21OBJCOPYFLAGS := --strip-all
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 38049357d6d3..40712e49381b 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -155,18 +155,22 @@ out:
155static int __init nfhd_init(void) 155static int __init nfhd_init(void)
156{ 156{
157 u32 blocks, bsize; 157 u32 blocks, bsize;
158 int ret;
158 int i; 159 int i;
159 160
160 nfhd_id = nf_get_id("XHDI"); 161 nfhd_id = nf_get_id("XHDI");
161 if (!nfhd_id) 162 if (!nfhd_id)
162 return -ENODEV; 163 return -ENODEV;
163 164
164 major_num = register_blkdev(major_num, "nfhd"); 165 ret = register_blkdev(major_num, "nfhd");
165 if (major_num <= 0) { 166 if (ret < 0) {
166 pr_warn("nfhd: unable to get major number\n"); 167 pr_warn("nfhd: unable to get major number\n");
167 return major_num; 168 return ret;
168 } 169 }
169 170
171 if (!major_num)
172 major_num = ret;
173
170 for (i = NFHD_DEV_OFFSET; i < 24; i++) { 174 for (i = NFHD_DEV_OFFSET; i < 24; i++) {
171 if (nfhd_get_capacity(i, 0, &blocks, &bsize)) 175 if (nfhd_get_capacity(i, 0, &blocks, &bsize))
172 continue; 176 continue;
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 9f1dd26903e3..95f8f631c4df 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -20,6 +20,7 @@ generic-y += mm-arch-hooks.h
20generic-y += percpu.h 20generic-y += percpu.h
21generic-y += preempt.h 21generic-y += preempt.h
22generic-y += sections.h 22generic-y += sections.h
23generic-y += shmparam.h
23generic-y += spinlock.h 24generic-y += spinlock.h
24generic-y += topology.h 25generic-y += topology.h
25generic-y += trace_clock.h 26generic-y += trace_clock.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 9c7d1d25bf3d..791cc8d54d0a 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += parport.h
26generic-y += percpu.h 26generic-y += percpu.h
27generic-y += preempt.h 27generic-y += preempt.h
28generic-y += serial.h 28generic-y += serial.h
29generic-y += shmparam.h
29generic-y += syscalls.h 30generic-y += syscalls.h
30generic-y += topology.h 31generic-y += topology.h
31generic-y += trace_clock.h 32generic-y += trace_clock.h
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 787290781b8c..a84c24d894aa 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1403,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT
1403 please say 'N' here. If you want a high-performance kernel to run on 1403 please say 'N' here. If you want a high-performance kernel to run on
1404 new Loongson 3 machines only, please say 'Y' here. 1404 new Loongson 3 machines only, please say 'Y' here.
1405 1405
1406config CPU_LOONGSON3_WORKAROUNDS
1407 bool "Old Loongson 3 LLSC Workarounds"
1408 default y if SMP
1409 depends on CPU_LOONGSON3
1410 help
1411 Loongson 3 processors have the llsc issues which require workarounds.
1412 Without workarounds the system may hang unexpectedly.
1413
1414 Newer Loongson 3 will fix these issues and no workarounds are needed.
1415 The workarounds have no significant side effect on them but may
1416 decrease the performance of the system so this option should be
1417 disabled unless the kernel is intended to be run on old systems.
1418
1419 If unsure, please say Y.
1420
1406config CPU_LOONGSON2E 1421config CPU_LOONGSON2E
1407 bool "Loongson 2E" 1422 bool "Loongson 2E"
1408 depends on SYS_HAS_CPU_LOONGSON2E 1423 depends on SYS_HAS_CPU_LOONGSON2E
@@ -3155,6 +3170,7 @@ config MIPS32_O32
3155config MIPS32_N32 3170config MIPS32_N32
3156 bool "Kernel support for n32 binaries" 3171 bool "Kernel support for n32 binaries"
3157 depends on 64BIT 3172 depends on 64BIT
3173 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
3158 select COMPAT 3174 select COMPAT
3159 select MIPS32_COMPAT 3175 select MIPS32_COMPAT
3160 select SYSVIPC_COMPAT if SYSVIPC 3176 select SYSVIPC_COMPAT if SYSVIPC
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 6054d49e608e..fe3773539eff 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
173 pm_power_off = bcm47xx_machine_halt; 173 pm_power_off = bcm47xx_machine_halt;
174} 174}
175 175
176#ifdef CONFIG_BCM47XX_BCMA
177static struct device * __init bcm47xx_setup_device(void)
178{
179 struct device *dev;
180 int err;
181
182 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
183 if (!dev)
184 return NULL;
185
186 err = dev_set_name(dev, "bcm47xx_soc");
187 if (err) {
188 pr_err("Failed to set SoC device name: %d\n", err);
189 kfree(dev);
190 return NULL;
191 }
192
193 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
194 if (err)
195 pr_err("Failed to set SoC DMA mask: %d\n", err);
196
197 return dev;
198}
199#endif
200
176/* 201/*
177 * This finishes bus initialization doing things that were not possible without 202 * This finishes bus initialization doing things that were not possible without
178 * kmalloc. Make sure to call it late enough (after mm_init). 203 * kmalloc. Make sure to call it late enough (after mm_init).
@@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
183 if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { 208 if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
184 int err; 209 int err;
185 210
211 bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
212 if (!bcm47xx_bus.bcma.dev)
213 panic("Failed to setup SoC device\n");
214
186 err = bcma_host_soc_init(&bcm47xx_bus.bcma); 215 err = bcma_host_soc_init(&bcm47xx_bus.bcma);
187 if (err) 216 if (err)
188 panic("Failed to initialize BCMA bus (err %d)", err); 217 panic("Failed to initialize BCMA bus (err %d)", err);
@@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
235#endif 264#endif
236#ifdef CONFIG_BCM47XX_BCMA 265#ifdef CONFIG_BCM47XX_BCMA
237 case BCM47XX_BUS_TYPE_BCMA: 266 case BCM47XX_BUS_TYPE_BCMA:
267 if (device_register(bcm47xx_bus.bcma.dev))
268 pr_err("Failed to register SoC device\n");
238 bcma_bus_register(&bcm47xx_bus.bcma.bus); 269 bcma_bus_register(&bcm47xx_bus.bcma.bus);
239 break; 270 break;
240#endif 271#endif
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 50cff3cbcc6d..4f7b1fa31cf5 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -76,7 +76,7 @@
76 status = "okay"; 76 status = "okay";
77 77
78 pinctrl-names = "default"; 78 pinctrl-names = "default";
79 pinctrl-0 = <&pins_uart2>; 79 pinctrl-0 = <&pins_uart3>;
80}; 80};
81 81
82&uart4 { 82&uart4 {
@@ -196,9 +196,9 @@
196 bias-disable; 196 bias-disable;
197 }; 197 };
198 198
199 pins_uart2: uart2 { 199 pins_uart3: uart3 {
200 function = "uart2"; 200 function = "uart3";
201 groups = "uart2-data", "uart2-hwflow"; 201 groups = "uart3-data", "uart3-hwflow";
202 bias-disable; 202 bias-disable;
203 }; 203 };
204 204
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 6fb16fd24035..2beb78a62b7d 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -161,7 +161,7 @@
161 #dma-cells = <2>; 161 #dma-cells = <2>;
162 162
163 interrupt-parent = <&intc>; 163 interrupt-parent = <&intc>;
164 interrupts = <29>; 164 interrupts = <20>;
165 165
166 clocks = <&cgu JZ4740_CLK_DMA>; 166 clocks = <&cgu JZ4740_CLK_DMA>;
167 167
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
index 2152b7ba65fb..cc8dbea0911f 100644
--- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
+++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
@@ -90,11 +90,11 @@
90 interrupts = <0>; 90 interrupts = <0>;
91 }; 91 };
92 92
93 axi_i2c: i2c@10A00000 { 93 axi_i2c: i2c@10a00000 {
94 compatible = "xlnx,xps-iic-2.00.a"; 94 compatible = "xlnx,xps-iic-2.00.a";
95 interrupt-parent = <&axi_intc>; 95 interrupt-parent = <&axi_intc>;
96 interrupts = <4>; 96 interrupts = <4>;
97 reg = < 0x10A00000 0x10000 >; 97 reg = < 0x10a00000 0x10000 >;
98 clocks = <&ext>; 98 clocks = <&ext>;
99 xlnx,clk-freq = <0x5f5e100>; 99 xlnx,clk-freq = <0x5f5e100>;
100 xlnx,family = "Artix7"; 100 xlnx,family = "Artix7";
@@ -106,9 +106,9 @@
106 #address-cells = <1>; 106 #address-cells = <1>;
107 #size-cells = <0>; 107 #size-cells = <0>;
108 108
109 ad7420@4B { 109 ad7420@4b {
110 compatible = "adi,adt7420"; 110 compatible = "adi,adt7420";
111 reg = <0x4B>; 111 reg = <0x4b>;
112 }; 112 };
113 } ; 113 } ;
114}; 114};
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 2c79ab52977a..8bf43c5a7bc7 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored)
98 " sync \n" 98 " sync \n"
99 " synci ($0) \n"); 99 " synci ($0) \n");
100 100
101 relocated_kexec_smp_wait(NULL); 101 kexec_reboot();
102} 102}
103#endif 103#endif
104 104
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 4e4ec779f182..6f981af67826 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
66# CONFIG_SERIAL_8250_PCI is not set 66# CONFIG_SERIAL_8250_PCI is not set
67CONFIG_SERIAL_8250_NR_UARTS=1 67CONFIG_SERIAL_8250_NR_UARTS=1
68CONFIG_SERIAL_8250_RUNTIME_UARTS=1 68CONFIG_SERIAL_8250_RUNTIME_UARTS=1
69CONFIG_SERIAL_OF_PLATFORM=y
69CONFIG_SERIAL_AR933X=y 70CONFIG_SERIAL_AR933X=y
70CONFIG_SERIAL_AR933X_CONSOLE=y 71CONFIG_SERIAL_AR933X_CONSOLE=y
71# CONFIG_HW_RANDOM is not set 72# CONFIG_HW_RANDOM is not set
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 43fcd35e2957..94096299fc56 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
58 if (kernel_uses_llsc) { \ 58 if (kernel_uses_llsc) { \
59 int temp; \ 59 int temp; \
60 \ 60 \
61 loongson_llsc_mb(); \
61 __asm__ __volatile__( \ 62 __asm__ __volatile__( \
62 " .set push \n" \ 63 " .set push \n" \
63 " .set "MIPS_ISA_LEVEL" \n" \ 64 " .set "MIPS_ISA_LEVEL" \n" \
@@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
85 if (kernel_uses_llsc) { \ 86 if (kernel_uses_llsc) { \
86 int temp; \ 87 int temp; \
87 \ 88 \
89 loongson_llsc_mb(); \
88 __asm__ __volatile__( \ 90 __asm__ __volatile__( \
89 " .set push \n" \ 91 " .set push \n" \
90 " .set "MIPS_ISA_LEVEL" \n" \ 92 " .set "MIPS_ISA_LEVEL" \n" \
@@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
118 if (kernel_uses_llsc) { \ 120 if (kernel_uses_llsc) { \
119 int temp; \ 121 int temp; \
120 \ 122 \
123 loongson_llsc_mb(); \
121 __asm__ __volatile__( \ 124 __asm__ __volatile__( \
122 " .set push \n" \ 125 " .set push \n" \
123 " .set "MIPS_ISA_LEVEL" \n" \ 126 " .set "MIPS_ISA_LEVEL" \n" \
@@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
256 if (kernel_uses_llsc) { \ 259 if (kernel_uses_llsc) { \
257 long temp; \ 260 long temp; \
258 \ 261 \
262 loongson_llsc_mb(); \
259 __asm__ __volatile__( \ 263 __asm__ __volatile__( \
260 " .set push \n" \ 264 " .set push \n" \
261 " .set "MIPS_ISA_LEVEL" \n" \ 265 " .set "MIPS_ISA_LEVEL" \n" \
@@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
283 if (kernel_uses_llsc) { \ 287 if (kernel_uses_llsc) { \
284 long temp; \ 288 long temp; \
285 \ 289 \
290 loongson_llsc_mb(); \
286 __asm__ __volatile__( \ 291 __asm__ __volatile__( \
287 " .set push \n" \ 292 " .set push \n" \
288 " .set "MIPS_ISA_LEVEL" \n" \ 293 " .set "MIPS_ISA_LEVEL" \n" \
@@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
316 if (kernel_uses_llsc) { \ 321 if (kernel_uses_llsc) { \
317 long temp; \ 322 long temp; \
318 \ 323 \
324 loongson_llsc_mb(); \
319 __asm__ __volatile__( \ 325 __asm__ __volatile__( \
320 " .set push \n" \ 326 " .set push \n" \
321 " .set "MIPS_ISA_LEVEL" \n" \ 327 " .set "MIPS_ISA_LEVEL" \n" \
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index a5eb1bb199a7..b7f6ac5e513c 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -222,6 +222,42 @@
222#define __smp_mb__before_atomic() __smp_mb__before_llsc() 222#define __smp_mb__before_atomic() __smp_mb__before_llsc()
223#define __smp_mb__after_atomic() smp_llsc_mb() 223#define __smp_mb__after_atomic() smp_llsc_mb()
224 224
225/*
226 * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
227 * store or pref) in between an ll & sc can cause the sc instruction to
228 * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
229 * containing such sequences, this bug bites harder than we might otherwise
230 * expect due to reordering & speculation:
231 *
232 * 1) A memory access appearing prior to the ll in program order may actually
233 * be executed after the ll - this is the reordering case.
234 *
235 * In order to avoid this we need to place a memory barrier (ie. a sync
236 * instruction) prior to every ll instruction, in between it & any earlier
237 * memory access instructions. Many of these cases are already covered by
238 * smp_mb__before_llsc() but for the remaining cases, typically ones in
239 * which multiple CPUs may operate on a memory location but ordering is not
240 * usually guaranteed, we use loongson_llsc_mb() below.
241 *
242 * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
243 *
244 * 2) If a conditional branch exists between an ll & sc with a target outside
245 * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
246 * or similar, then misprediction of the branch may allow speculative
247 * execution of memory accesses from outside of the ll-sc loop.
248 *
249 * In order to avoid this we need a memory barrier (ie. a sync instruction)
250 * at each affected branch target, for which we also use loongson_llsc_mb()
251 * defined below.
252 *
253 * This case affects all current Loongson 3 CPUs.
254 */
255#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
256#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
257#else
258#define loongson_llsc_mb() do { } while (0)
259#endif
260
225#include <asm-generic/barrier.h> 261#include <asm-generic/barrier.h>
226 262
227#endif /* __ASM_BARRIER_H */ 263#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index c4675957b21b..830c93a010c3 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); 69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
70#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 70#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
71 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 71 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
72 loongson_llsc_mb();
72 do { 73 do {
73 __asm__ __volatile__( 74 __asm__ __volatile__(
74 " " __LL "%0, %1 # set_bit \n" 75 " " __LL "%0, %1 # set_bit \n"
@@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
79 } while (unlikely(!temp)); 80 } while (unlikely(!temp));
80#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 81#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
81 } else if (kernel_uses_llsc) { 82 } else if (kernel_uses_llsc) {
83 loongson_llsc_mb();
82 do { 84 do {
83 __asm__ __volatile__( 85 __asm__ __volatile__(
84 " .set push \n" 86 " .set push \n"
@@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
123 : "ir" (~(1UL << bit))); 125 : "ir" (~(1UL << bit)));
124#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 126#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
125 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 127 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
128 loongson_llsc_mb();
126 do { 129 do {
127 __asm__ __volatile__( 130 __asm__ __volatile__(
128 " " __LL "%0, %1 # clear_bit \n" 131 " " __LL "%0, %1 # clear_bit \n"
@@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
133 } while (unlikely(!temp)); 136 } while (unlikely(!temp));
134#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 137#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
135 } else if (kernel_uses_llsc) { 138 } else if (kernel_uses_llsc) {
139 loongson_llsc_mb();
136 do { 140 do {
137 __asm__ __volatile__( 141 __asm__ __volatile__(
138 " .set push \n" 142 " .set push \n"
@@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
193 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 197 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
194 unsigned long temp; 198 unsigned long temp;
195 199
200 loongson_llsc_mb();
196 do { 201 do {
197 __asm__ __volatile__( 202 __asm__ __volatile__(
198 " .set push \n" 203 " .set push \n"
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index c14d798f3888..b83b0397462d 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -50,6 +50,7 @@
50 "i" (-EFAULT) \ 50 "i" (-EFAULT) \
51 : "memory"); \ 51 : "memory"); \
52 } else if (cpu_has_llsc) { \ 52 } else if (cpu_has_llsc) { \
53 loongson_llsc_mb(); \
53 __asm__ __volatile__( \ 54 __asm__ __volatile__( \
54 " .set push \n" \ 55 " .set push \n" \
55 " .set noat \n" \ 56 " .set noat \n" \
@@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
163 "i" (-EFAULT) 164 "i" (-EFAULT)
164 : "memory"); 165 : "memory");
165 } else if (cpu_has_llsc) { 166 } else if (cpu_has_llsc) {
167 loongson_llsc_mb();
166 __asm__ __volatile__( 168 __asm__ __volatile__(
167 "# futex_atomic_cmpxchg_inatomic \n" 169 "# futex_atomic_cmpxchg_inatomic \n"
168 " .set push \n" 170 " .set push \n"
@@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
192 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 194 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
193 "i" (-EFAULT) 195 "i" (-EFAULT)
194 : "memory"); 196 : "memory");
197 loongson_llsc_mb();
195 } else 198 } else
196 return -ENOSYS; 199 return -ENOSYS;
197 200
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
index c6b63a409641..6dd8ad2409dc 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -18,8 +18,6 @@
18#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) 18#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
19#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) 19#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
20 20
21#define MIPS_CPU_TIMER_IRQ 7
22
23#define MAX_IM 5 21#define MAX_IM 5
24 22
25#endif /* _FALCON_IRQ__ */ 23#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index 141076325307..0b424214a5e9 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -19,8 +19,6 @@
19 19
20#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) 20#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
21 21
22#define MIPS_CPU_TIMER_IRQ 7
23
24#define MAX_IM 5 22#define MAX_IM 5
25 23
26#endif 24#endif
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 57933fc8fd98..910851c62db3 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -228,6 +228,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
228 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 228 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
229 : [global] "r" (page_global)); 229 : [global] "r" (page_global));
230 } else if (kernel_uses_llsc) { 230 } else if (kernel_uses_llsc) {
231 loongson_llsc_mb();
231 __asm__ __volatile__ ( 232 __asm__ __volatile__ (
232 " .set push \n" 233 " .set push \n"
233 " .set "MIPS_ISA_ARCH_LEVEL" \n" 234 " .set "MIPS_ISA_ARCH_LEVEL" \n"
@@ -242,6 +243,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
242 " .set pop \n" 243 " .set pop \n"
243 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 244 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
244 : [global] "r" (page_global)); 245 : [global] "r" (page_global));
246 loongson_llsc_mb();
245 } 247 }
246#else /* !CONFIG_SMP */ 248#else /* !CONFIG_SMP */
247 if (pte_none(*buddy)) 249 if (pte_none(*buddy))
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 6256d35dbf4d..bedb5047aff3 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -74,14 +74,15 @@ static int __init vdma_init(void)
74 get_order(VDMA_PGTBL_SIZE)); 74 get_order(VDMA_PGTBL_SIZE));
75 BUG_ON(!pgtbl); 75 BUG_ON(!pgtbl);
76 dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); 76 dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
77 pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); 77 pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
78 78
79 /* 79 /*
80 * Clear the R4030 translation table 80 * Clear the R4030 translation table
81 */ 81 */
82 vdma_pgtbl_init(); 82 vdma_pgtbl_init();
83 83
84 r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); 84 r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
85 CPHYSADDR((unsigned long)pgtbl));
85 r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); 86 r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
86 r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); 87 r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
87 88
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 8f5bd04f320a..7f3f136572de 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -457,5 +457,5 @@ void mips_cm_error_report(void)
457 } 457 }
458 458
459 /* reprime cause register */ 459 /* reprime cause register */
460 write_gcr_error_cause(0); 460 write_gcr_error_cause(cm_error);
461} 461}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 6829a064aac8..339870ed92f7 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
371static int get_frame_info(struct mips_frame_info *info) 371static int get_frame_info(struct mips_frame_info *info)
372{ 372{
373 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); 373 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
374 union mips_instruction insn, *ip, *ip_end; 374 union mips_instruction insn, *ip;
375 const unsigned int max_insns = 128; 375 const unsigned int max_insns = 128;
376 unsigned int last_insn_size = 0; 376 unsigned int last_insn_size = 0;
377 unsigned int i; 377 unsigned int i;
@@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
384 if (!ip) 384 if (!ip)
385 goto err; 385 goto err;
386 386
387 ip_end = (void *)ip + info->func_size; 387 for (i = 0; i < max_insns; i++) {
388
389 for (i = 0; i < max_insns && ip < ip_end; i++) {
390 ip = (void *)ip + last_insn_size; 388 ip = (void *)ip + last_insn_size;
389
391 if (is_mmips && mm_insn_16bit(ip->halfword[0])) { 390 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
392 insn.word = ip->halfword[0] << 16; 391 insn.word = ip->halfword[0] << 16;
393 last_insn_size = 2; 392 last_insn_size = 2;
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index f0bc3312ed11..6549499eb202 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
224 .irq_set_type = ltq_eiu_settype, 224 .irq_set_type = ltq_eiu_settype,
225}; 225};
226 226
227static void ltq_hw_irqdispatch(int module) 227static void ltq_hw_irq_handler(struct irq_desc *desc)
228{ 228{
229 int module = irq_desc_get_irq(desc) - 2;
229 u32 irq; 230 u32 irq;
231 int hwirq;
230 232
231 irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); 233 irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
232 if (irq == 0) 234 if (irq == 0)
@@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
237 * other bits might be bogus 239 * other bits might be bogus
238 */ 240 */
239 irq = __fls(irq); 241 irq = __fls(irq);
240 do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); 242 hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
243 generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
241 244
242 /* if this is a EBU irq, we need to ack it or get a deadlock */ 245 /* if this is a EBU irq, we need to ack it or get a deadlock */
243 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) 246 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
@@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
245 LTQ_EBU_PCC_ISTAT); 248 LTQ_EBU_PCC_ISTAT);
246} 249}
247 250
248#define DEFINE_HWx_IRQDISPATCH(x) \
249 static void ltq_hw ## x ## _irqdispatch(void) \
250 { \
251 ltq_hw_irqdispatch(x); \
252 }
253DEFINE_HWx_IRQDISPATCH(0)
254DEFINE_HWx_IRQDISPATCH(1)
255DEFINE_HWx_IRQDISPATCH(2)
256DEFINE_HWx_IRQDISPATCH(3)
257DEFINE_HWx_IRQDISPATCH(4)
258
259#if MIPS_CPU_TIMER_IRQ == 7
260static void ltq_hw5_irqdispatch(void)
261{
262 do_IRQ(MIPS_CPU_TIMER_IRQ);
263}
264#else
265DEFINE_HWx_IRQDISPATCH(5)
266#endif
267
268static void ltq_hw_irq_handler(struct irq_desc *desc)
269{
270 ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
271}
272
273asmlinkage void plat_irq_dispatch(void)
274{
275 unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
276 int irq;
277
278 if (!pending) {
279 spurious_interrupt();
280 return;
281 }
282
283 pending >>= CAUSEB_IP;
284 while (pending) {
285 irq = fls(pending) - 1;
286 do_IRQ(MIPS_CPU_IRQ_BASE + irq);
287 pending &= ~BIT(irq);
288 }
289}
290
291static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) 251static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
292{ 252{
293 struct irq_chip *chip = &ltq_irq_type; 253 struct irq_chip *chip = &ltq_irq_type;
@@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
343 for (i = 0; i < MAX_IM; i++) 303 for (i = 0; i < MAX_IM; i++)
344 irq_set_chained_handler(i + 2, ltq_hw_irq_handler); 304 irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
345 305
346 if (cpu_has_vint) {
347 pr_info("Setting up vectored interrupts\n");
348 set_vi_handler(2, ltq_hw0_irqdispatch);
349 set_vi_handler(3, ltq_hw1_irqdispatch);
350 set_vi_handler(4, ltq_hw2_irqdispatch);
351 set_vi_handler(5, ltq_hw3_irqdispatch);
352 set_vi_handler(6, ltq_hw4_irqdispatch);
353 set_vi_handler(7, ltq_hw5_irqdispatch);
354 }
355
356 ltq_domain = irq_domain_add_linear(node, 306 ltq_domain = irq_domain_add_linear(node,
357 (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, 307 (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
358 &irq_domain_ops, 0); 308 &irq_domain_ops, 0);
359 309
360#ifndef CONFIG_MIPS_MT_SMP
361 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
362 IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
363#else
364 set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
365 IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
366#endif
367
368 /* tell oprofile which irq to use */ 310 /* tell oprofile which irq to use */
369 ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); 311 ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
370 312
371 /*
372 * if the timer irq is not one of the mips irqs we need to
373 * create a mapping
374 */
375 if (MIPS_CPU_TIMER_IRQ != 7)
376 irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
377
378 /* the external interrupts are optional and xway only */ 313 /* the external interrupts are optional and xway only */
379 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); 314 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
380 if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { 315 if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
@@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
411 346
412unsigned int get_c0_compare_int(void) 347unsigned int get_c0_compare_int(void)
413{ 348{
414 return MIPS_CPU_TIMER_IRQ; 349 return CP0_LEGACY_COMPARE_IRQ;
415} 350}
416 351
417static struct of_device_id __initdata of_irq_ids[] = { 352static struct of_device_id __initdata of_irq_ids[] = {
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 982859f2b2a3..5e6a1a45cbd2 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -129,9 +129,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
129 unsigned long flags; 129 unsigned long flags;
130 130
131 ch->desc = 0; 131 ch->desc = 0;
132 ch->desc_base = dma_zalloc_coherent(ch->dev, 132 ch->desc_base = dma_alloc_coherent(ch->dev,
133 LTQ_DESC_NUM * LTQ_DESC_SIZE, 133 LTQ_DESC_NUM * LTQ_DESC_SIZE,
134 &ch->phys, GFP_ATOMIC); 134 &ch->phys, GFP_ATOMIC);
135 135
136 spin_lock_irqsave(&ltq_dma_lock, flags); 136 spin_lock_irqsave(&ltq_dma_lock, flags);
137 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 137 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index 0fce4608aa88..c1a4d4dc4665 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
23endif 23endif
24 24
25cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap 25cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap
26
27#
28# Some versions of binutils, not currently mainline as of 2019/02/04, support
29# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
30# to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a
31# description).
32#
33# We disable this in order to prevent the assembler meddling with the
34# instruction that labels refer to, ie. if we label an ll instruction:
35#
36# 1: ll v0, 0(a0)
37#
38# ...then with the assembler fix applied the label may actually point at a sync
39# instruction inserted by the assembler, and if we were using the label in an
40# exception table the table would no longer contain the address of the ll
41# instruction.
42#
43# Avoid this by explicitly disabling that assembler behaviour. If upstream
44# binutils does not merge support for the flag then we can revisit & remove
45# this later - for now it ensures vendor toolchains don't cause problems.
46#
47cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
48
26# 49#
27# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a 50# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
28# as MIPS64 R2; older versions as just R1. This leaves the possibility open 51# as MIPS64 R2; older versions as just R1. This leaves the possibility open
diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c
index a60715e11306..b26892ce871c 100644
--- a/arch/mips/loongson64/common/reset.c
+++ b/arch/mips/loongson64/common/reset.c
@@ -59,7 +59,12 @@ static void loongson_poweroff(void)
59{ 59{
60#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE 60#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
61 mach_prepare_shutdown(); 61 mach_prepare_shutdown();
62 unreachable(); 62
63 /*
64 * It needs a wait loop here, but mips/kernel/reset.c already calls
65 * a generic delay loop, machine_hang(), so simply return.
66 */
67 return;
63#else 68#else
64 void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; 69 void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
65 70
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 37b1cb246332..65b6e85447b1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
932 * to mimic that here by taking a load/istream page 932 * to mimic that here by taking a load/istream page
933 * fault. 933 * fault.
934 */ 934 */
935 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
936 uasm_i_sync(p, 0);
935 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); 937 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
936 uasm_i_jr(p, ptr); 938 uasm_i_jr(p, ptr);
937 939
@@ -1646,6 +1648,8 @@ static void
1646iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1648iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1647{ 1649{
1648#ifdef CONFIG_SMP 1650#ifdef CONFIG_SMP
1651 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
1652 uasm_i_sync(p, 0);
1649# ifdef CONFIG_PHYS_ADDR_T_64BIT 1653# ifdef CONFIG_PHYS_ADDR_T_64BIT
1650 if (cpu_has_64bits) 1654 if (cpu_has_64bits)
1651 uasm_i_lld(p, pte, 0, ptr); 1655 uasm_i_lld(p, pte, 0, ptr);
@@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void)
2259#endif 2263#endif
2260 2264
2261 uasm_l_nopage_tlbl(&l, p); 2265 uasm_l_nopage_tlbl(&l, p);
2266 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2267 uasm_i_sync(&p, 0);
2262 build_restore_work_registers(&p); 2268 build_restore_work_registers(&p);
2263#ifdef CONFIG_CPU_MICROMIPS 2269#ifdef CONFIG_CPU_MICROMIPS
2264 if ((unsigned long)tlb_do_page_fault_0 & 1) { 2270 if ((unsigned long)tlb_do_page_fault_0 & 1) {
@@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void)
2313#endif 2319#endif
2314 2320
2315 uasm_l_nopage_tlbs(&l, p); 2321 uasm_l_nopage_tlbs(&l, p);
2322 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2323 uasm_i_sync(&p, 0);
2316 build_restore_work_registers(&p); 2324 build_restore_work_registers(&p);
2317#ifdef CONFIG_CPU_MICROMIPS 2325#ifdef CONFIG_CPU_MICROMIPS
2318 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2326 if ((unsigned long)tlb_do_page_fault_1 & 1) {
@@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void)
2368#endif 2376#endif
2369 2377
2370 uasm_l_nopage_tlbm(&l, p); 2378 uasm_l_nopage_tlbm(&l, p);
2379 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2380 uasm_i_sync(&p, 0);
2371 build_restore_work_registers(&p); 2381 build_restore_work_registers(&p);
2372#ifdef CONFIG_CPU_MICROMIPS 2382#ifdef CONFIG_CPU_MICROMIPS
2373 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2383 if ((unsigned long)tlb_do_page_fault_1 & 1) {
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index b16710a8a9e7..76e9bf88d3b9 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -79,8 +79,6 @@ enum reg_val_type {
79 REG_64BIT_32BIT, 79 REG_64BIT_32BIT,
80 /* 32-bit compatible, need truncation for 64-bit ops. */ 80 /* 32-bit compatible, need truncation for 64-bit ops. */
81 REG_32BIT, 81 REG_32BIT,
82 /* 32-bit zero extended. */
83 REG_32BIT_ZERO_EX,
84 /* 32-bit no sign/zero extension needed. */ 82 /* 32-bit no sign/zero extension needed. */
85 REG_32BIT_POS 83 REG_32BIT_POS
86}; 84};
@@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
343 const struct bpf_prog *prog = ctx->skf; 341 const struct bpf_prog *prog = ctx->skf;
344 int stack_adjust = ctx->stack_size; 342 int stack_adjust = ctx->stack_size;
345 int store_offset = stack_adjust - 8; 343 int store_offset = stack_adjust - 8;
344 enum reg_val_type td;
346 int r0 = MIPS_R_V0; 345 int r0 = MIPS_R_V0;
347 346
348 if (dest_reg == MIPS_R_RA && 347 if (dest_reg == MIPS_R_RA) {
349 get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
350 /* Don't let zero extended value escape. */ 348 /* Don't let zero extended value escape. */
351 emit_instr(ctx, sll, r0, r0, 0); 349 td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
350 if (td == REG_64BIT)
351 emit_instr(ctx, sll, r0, r0, 0);
352 }
352 353
353 if (ctx->flags & EBPF_SAVE_RA) { 354 if (ctx->flags & EBPF_SAVE_RA) {
354 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); 355 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
@@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
692 if (dst < 0) 693 if (dst < 0)
693 return dst; 694 return dst;
694 td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 695 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
695 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 696 if (td == REG_64BIT) {
696 /* sign extend */ 697 /* sign extend */
697 emit_instr(ctx, sll, dst, dst, 0); 698 emit_instr(ctx, sll, dst, dst, 0);
698 } 699 }
@@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
707 if (dst < 0) 708 if (dst < 0)
708 return dst; 709 return dst;
709 td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 710 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
710 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 711 if (td == REG_64BIT) {
711 /* sign extend */ 712 /* sign extend */
712 emit_instr(ctx, sll, dst, dst, 0); 713 emit_instr(ctx, sll, dst, dst, 0);
713 } 714 }
@@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
721 if (dst < 0) 722 if (dst < 0)
722 return dst; 723 return dst;
723 td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 724 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
724 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) 725 if (td == REG_64BIT)
725 /* sign extend */ 726 /* sign extend */
726 emit_instr(ctx, sll, dst, dst, 0); 727 emit_instr(ctx, sll, dst, dst, 0);
727 if (insn->imm == 1) { 728 if (insn->imm == 1) {
@@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
860 if (src < 0 || dst < 0) 861 if (src < 0 || dst < 0)
861 return -EINVAL; 862 return -EINVAL;
862 td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 863 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
863 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 864 if (td == REG_64BIT) {
864 /* sign extend */ 865 /* sign extend */
865 emit_instr(ctx, sll, dst, dst, 0); 866 emit_instr(ctx, sll, dst, dst, 0);
866 } 867 }
867 did_move = false; 868 did_move = false;
868 ts = get_reg_val_type(ctx, this_idx, insn->src_reg); 869 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
869 if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { 870 if (ts == REG_64BIT) {
870 int tmp_reg = MIPS_R_AT; 871 int tmp_reg = MIPS_R_AT;
871 872
872 if (bpf_op == BPF_MOV) { 873 if (bpf_op == BPF_MOV) {
@@ -1254,8 +1255,7 @@ jeq_common:
1254 if (insn->imm == 64 && td == REG_32BIT) 1255 if (insn->imm == 64 && td == REG_32BIT)
1255 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 1256 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
1256 1257
1257 if (insn->imm != 64 && 1258 if (insn->imm != 64 && td == REG_64BIT) {
1258 (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
1259 /* sign extend */ 1259 /* sign extend */
1260 emit_instr(ctx, sll, dst, dst, 0); 1260 emit_instr(ctx, sll, dst, dst, 0);
1261 } 1261 }
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 2a5bb849b10e..288b58b00dc8 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
369 int irq; 369 int irq;
370 struct irq_chip *msi; 370 struct irq_chip *msi;
371 371
372 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { 372 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
373 return 0;
374 } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
373 msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; 375 msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
374 msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; 376 msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
375 msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; 377 msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 5017d5843c5a..fc29b85cfa92 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
568 if (octeon_has_feature(OCTEON_FEATURE_PCIE)) 568 if (octeon_has_feature(OCTEON_FEATURE_PCIE))
569 return 0; 569 return 0;
570 570
571 if (!octeon_is_pci_host()) {
572 pr_notice("Not in host mode, PCI Controller not initialized\n");
573 return 0;
574 }
575
571 /* Point pcibios_map_irq() to the PCI version of it */ 576 /* Point pcibios_map_irq() to the PCI version of it */
572 octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; 577 octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
573 578
@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
579 else 584 else
580 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; 585 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
581 586
582 if (!octeon_is_pci_host()) {
583 pr_notice("Not in host mode, PCI Controller not initialized\n");
584 return 0;
585 }
586
587 /* PCI I/O and PCI MEM values */ 587 /* PCI I/O and PCI MEM values */
588 set_io_port_base(OCTEON_PCI_IOSPACE_BASE); 588 set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
589 ioport_resource.start = 0; 589 ioport_resource.start = 0;
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index f6fd340e39c2..0ede4deb8181 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -8,6 +8,7 @@ ccflags-vdso := \
8 $(filter -E%,$(KBUILD_CFLAGS)) \ 8 $(filter -E%,$(KBUILD_CFLAGS)) \
9 $(filter -mmicromips,$(KBUILD_CFLAGS)) \ 9 $(filter -mmicromips,$(KBUILD_CFLAGS)) \
10 $(filter -march=%,$(KBUILD_CFLAGS)) \ 10 $(filter -march=%,$(KBUILD_CFLAGS)) \
11 $(filter -m%-float,$(KBUILD_CFLAGS)) \
11 -D__VDSO__ 12 -D__VDSO__
12 13
13ifdef CONFIG_CC_IS_CLANG 14ifdef CONFIG_CC_IS_CLANG
@@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
129 $(call cmd,force_checksrc) 130 $(call cmd,force_checksrc)
130 $(call if_changed_rule,cc_o_c) 131 $(call if_changed_rule,cc_o_c)
131 132
132$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 133$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
133$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE 134$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
134 $(call if_changed_dep,cpp_lds_S) 135 $(call if_changed_dep,cpp_lds_S)
135 136
@@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
169 $(call cmd,force_checksrc) 170 $(call cmd,force_checksrc)
170 $(call if_changed_rule,cc_o_c) 171 $(call if_changed_rule,cc_o_c)
171 172
172$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 173$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
173$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE 174$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
174 $(call if_changed_dep,cpp_lds_S) 175 $(call if_changed_dep,cpp_lds_S)
175 176
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 0a935c136ec2..ac3482882cf9 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
3 3
4KBUILD_DEFCONFIG := defconfig 4KBUILD_DEFCONFIG := defconfig
5 5
6comma = ,
7
8
9ifdef CONFIG_FUNCTION_TRACER 6ifdef CONFIG_FUNCTION_TRACER
10arch-y += -malways-save-lp -mno-relax 7arch-y += -malways-save-lp -mno-relax
11endif 8endif
@@ -54,8 +51,6 @@ endif
54boot := arch/nds32/boot 51boot := arch/nds32/boot
55core-y += $(boot)/dts/ 52core-y += $(boot)/dts/
56 53
57.PHONY: FORCE
58
59Image: vmlinux 54Image: vmlinux
60 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 55 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
61 56
@@ -68,9 +63,6 @@ prepare: vdso_prepare
68vdso_prepare: prepare0 63vdso_prepare: prepare0
69 $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h 64 $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h
70 65
71CLEAN_FILES += include/asm-nds32/constants.h*
72
73# We use MRPROPER_FILES and CLEAN_FILES now
74archclean: 66archclean:
75 $(Q)$(MAKE) $(clean)=$(boot) 67 $(Q)$(MAKE) $(clean)=$(boot)
76 68
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
index 70e06d34006c..bf10141c7426 100644
--- a/arch/openrisc/Makefile
+++ b/arch/openrisc/Makefile
@@ -20,7 +20,6 @@
20KBUILD_DEFCONFIG := or1ksim_defconfig 20KBUILD_DEFCONFIG := or1ksim_defconfig
21 21
22OBJCOPYFLAGS := -O binary -R .note -R .comment -S 22OBJCOPYFLAGS := -O binary -R .note -R .comment -S
23LDFLAGS_vmlinux :=
24LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 23LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
25 24
26KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ 25KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__
@@ -50,5 +49,3 @@ else
50BUILTIN_DTB := n 49BUILTIN_DTB := n
51endif 50endif
52core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ 51core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/
53
54all: vmlinux
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index eb87cd8327c8..1f04844b6b82 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -34,6 +34,7 @@ generic-y += qrwlock_types.h
34generic-y += qrwlock.h 34generic-y += qrwlock.h
35generic-y += sections.h 35generic-y += sections.h
36generic-y += segment.h 36generic-y += segment.h
37generic-y += shmparam.h
37generic-y += string.h 38generic-y += string.h
38generic-y += switch_to.h 39generic-y += switch_to.h
39generic-y += topology.h 40generic-y += topology.h
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index bc8191a34db7..a44682c8adc3 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -58,8 +58,12 @@
58/* Ensure that addr is below task's addr_limit */ 58/* Ensure that addr is below task's addr_limit */
59#define __addr_ok(addr) ((unsigned long) addr < get_fs()) 59#define __addr_ok(addr) ((unsigned long) addr < get_fs())
60 60
61#define access_ok(addr, size) \ 61#define access_ok(addr, size) \
62 __range_ok((unsigned long)addr, (unsigned long)size) 62({ \
63 unsigned long __ao_addr = (unsigned long)(addr); \
64 unsigned long __ao_size = (unsigned long)(size); \
65 __range_ok(__ao_addr, __ao_size); \
66})
63 67
64/* 68/*
65 * These are the main single-value transfer routines. They automatically 69 * These are the main single-value transfer routines. They automatically
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 2582df1c529b..0964c236e3e5 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
308 308
309long do_syscall_trace_enter(struct pt_regs *regs) 309long do_syscall_trace_enter(struct pt_regs *regs)
310{ 310{
311 if (test_thread_flag(TIF_SYSCALL_TRACE) && 311 if (test_thread_flag(TIF_SYSCALL_TRACE)) {
312 tracehook_report_syscall_entry(regs)) { 312 int rc = tracehook_report_syscall_entry(regs);
313
313 /* 314 /*
314 * Tracing decided this syscall should not happen or the 315 * As tracesys_next does not set %r28 to -ENOSYS
315 * debugger stored an invalid system call number. Skip 316 * when %r20 is set to -1, initialize it here.
316 * the system call and the system call restart handling.
317 */ 317 */
318 regs->gr[20] = -1UL; 318 regs->gr[28] = -ENOSYS;
319 goto out; 319
320 if (rc) {
321 /*
322 * A nonzero return code from
323 * tracehook_report_syscall_entry() tells us
324 * to prevent the syscall execution. Skip
325 * the syscall call and the syscall restart handling.
326 *
327 * Note that the tracer may also just change
328 * regs->gr[20] to an invalid syscall number,
329 * that is handled by tracesys_next.
330 */
331 regs->gr[20] = -1UL;
332 return -1;
333 }
320 } 334 }
321 335
322 /* Do the secure computing check after ptrace. */ 336 /* Do the secure computing check after ptrace. */
@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
340 regs->gr[24] & 0xffffffff, 354 regs->gr[24] & 0xffffffff,
341 regs->gr[23] & 0xffffffff); 355 regs->gr[23] & 0xffffffff);
342 356
343out:
344 /* 357 /*
345 * Sign extend the syscall number to 64bit since it may have been 358 * Sign extend the syscall number to 64bit since it may have been
346 * modified by a compat ptrace call 359 * modified by a compat ptrace call
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 2e6ada28da64..d8c8d7c9df15 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud)
904 904
905static inline int pud_present(pud_t pud) 905static inline int pud_present(pud_t pud)
906{ 906{
907 return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); 907 return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
908} 908}
909 909
910extern struct page *pud_page(pud_t pud); 910extern struct page *pud_page(pud_t pud);
@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd)
951 951
952static inline int pgd_present(pgd_t pgd) 952static inline int pgd_present(pgd_t pgd)
953{ 953{
954 return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); 954 return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
955} 955}
956 956
957static inline pte_t pgd_pte(pgd_t pgd) 957static inline pte_t pgd_pte(pgd_t pgd)
@@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1258 1258
1259#define pmd_move_must_withdraw pmd_move_must_withdraw 1259#define pmd_move_must_withdraw pmd_move_must_withdraw
1260struct spinlock; 1260struct spinlock;
1261static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 1261extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
1262 struct spinlock *old_pmd_ptl, 1262 struct spinlock *old_pmd_ptl,
1263 struct vm_area_struct *vma) 1263 struct vm_area_struct *vma);
1264{ 1264/*
1265 if (radix_enabled()) 1265 * Hash translation mode use the deposited table to store hash pte
1266 return false; 1266 * slot information.
1267 /* 1267 */
1268 * Archs like ppc64 use pgtable to store per pmd
1269 * specific information. So when we switch the pmd,
1270 * we should also withdraw and deposit the pgtable
1271 */
1272 return true;
1273}
1274
1275
1276#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit 1268#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
1277static inline bool arch_needs_pgtable_deposit(void) 1269static inline bool arch_needs_pgtable_deposit(void)
1278{ 1270{
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
index ff91192407d1..f599064dd8dc 100644
--- a/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -47,6 +47,7 @@ enum perf_event_powerpc_regs {
47 PERF_REG_POWERPC_DAR, 47 PERF_REG_POWERPC_DAR,
48 PERF_REG_POWERPC_DSISR, 48 PERF_REG_POWERPC_DSISR,
49 PERF_REG_POWERPC_SIER, 49 PERF_REG_POWERPC_SIER,
50 PERF_REG_POWERPC_MMCRA,
50 PERF_REG_POWERPC_MAX, 51 PERF_REG_POWERPC_MAX,
51}; 52};
52#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ 53#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 57deb1e9ffea..20cc816b3508 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -852,11 +852,12 @@ start_here:
852 852
853 /* set up the PTE pointers for the Abatron bdiGDB. 853 /* set up the PTE pointers for the Abatron bdiGDB.
854 */ 854 */
855 tovirt(r6,r6)
856 lis r5, abatron_pteptrs@h 855 lis r5, abatron_pteptrs@h
857 ori r5, r5, abatron_pteptrs@l 856 ori r5, r5, abatron_pteptrs@l
858 stw r5, 0xf0(0) /* Must match your Abatron config file */ 857 stw r5, 0xf0(0) /* Must match your Abatron config file */
859 tophys(r5,r5) 858 tophys(r5,r5)
859 lis r6, swapper_pg_dir@h
860 ori r6, r6, swapper_pg_dir@l
860 stw r6, 0(r5) 861 stw r6, 0(r5)
861 862
862/* Now turn on the MMU for real! */ 863/* Now turn on the MMU for real! */
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index bd5e6834ca69..6794466f6420 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn)
755 if (restore_tm_sigcontexts(current, &uc->uc_mcontext, 755 if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
756 &uc_transact->uc_mcontext)) 756 &uc_transact->uc_mcontext))
757 goto badframe; 757 goto badframe;
758 } 758 } else
759#endif 759#endif
760 /* Fall through, for non-TM restore */ 760 {
761 if (!MSR_TM_ACTIVE(msr)) {
762 /* 761 /*
762 * Fall through, for non-TM restore
763 *
763 * Unset MSR[TS] on the thread regs since MSR from user 764 * Unset MSR[TS] on the thread regs since MSR from user
764 * context does not have MSR active, and recheckpoint was 765 * context does not have MSR active, and recheckpoint was
765 * not called since restore_tm_sigcontexts() was not called 766 * not called since restore_tm_sigcontexts() was not called
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 29746dc28df5..517662a56bdc 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -967,13 +967,6 @@ out:
967} 967}
968#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 968#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
969 969
970#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
971unsigned long __init arch_syscall_addr(int nr)
972{
973 return sys_call_table[nr*2];
974}
975#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
976
977#ifdef PPC64_ELF_ABI_v1 970#ifdef PPC64_ELF_ABI_v1
978char *arch_ftrace_match_adjust(char *str, const char *search) 971char *arch_ftrace_match_adjust(char *str, const char *search)
979{ 972{
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index f3c31f5e1026..ecd31569a120 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -400,3 +400,25 @@ void arch_report_meminfo(struct seq_file *m)
400 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); 400 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
401} 401}
402#endif /* CONFIG_PROC_FS */ 402#endif /* CONFIG_PROC_FS */
403
404/*
405 * For hash translation mode, we use the deposited table to store hash slot
406 * information and they are stored at PTRS_PER_PMD offset from related pmd
407 * location. Hence a pmd move requires deposit and withdraw.
408 *
409 * For radix translation with split pmd ptl, we store the deposited table in the
410 * pmd page. Hence if we have different pmd page we need to withdraw during pmd
411 * move.
412 *
413 * With hash we use deposited table always irrespective of anon or not.
414 * With radix we use deposited table only for anonymous mapping.
415 */
416int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
417 struct spinlock *old_pmd_ptl,
418 struct vm_area_struct *vma)
419{
420 if (radix_enabled())
421 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
422
423 return true;
424}
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index 5c36b3a8d47a..3349f3f8fe84 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
70 PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), 70 PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
71 PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), 71 PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
72 PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), 72 PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
73 PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
73}; 74};
74 75
75u64 perf_reg_value(struct pt_regs *regs, int idx) 76u64 perf_reg_value(struct pt_regs *regs, int idx)
@@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
83 !is_sier_available())) 84 !is_sier_available()))
84 return 0; 85 return 0;
85 86
87 if (idx == PERF_REG_POWERPC_MMCRA &&
88 (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
89 IS_ENABLED(CONFIG_PPC32)))
90 return 0;
91
86 return regs_get_register(regs, pt_regs_offset[idx]); 92 return regs_get_register(regs, pt_regs_offset[idx]);
87} 93}
88 94
diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c
index a1aaa1569d7c..f0e488d97567 100644
--- a/arch/powerpc/platforms/4xx/ocm.c
+++ b/arch/powerpc/platforms/4xx/ocm.c
@@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
237 continue; 237 continue;
238 238
239 seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); 239 seq_printf(m, "PPC4XX OCM : %d\n", ocm->index);
240 seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys)); 240 seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys));
241 seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); 241 seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal);
242 seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); 242 seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal);
243 seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); 243 seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal);
244 244
245 seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys)); 245 seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys));
246 seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); 246 seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt);
247 seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); 247 seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal);
248 seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); 248 seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree);
@@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
252 blk->size, blk->owner); 252 blk->size, blk->owner);
253 } 253 }
254 254
255 seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys)); 255 seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys));
256 seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); 256 seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt);
257 seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); 257 seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal);
258 seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); 258 seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index e66644e0fb40..9438fa0fc355 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void)
538 /* see if there is a keyboard in the device tree 538 /* see if there is a keyboard in the device tree
539 with a parent of type "adb" */ 539 with a parent of type "adb" */
540 for_each_node_by_name(kbd, "keyboard") 540 for_each_node_by_name(kbd, "keyboard")
541 if (kbd->parent && kbd->parent->type 541 if (of_node_is_type(kbd->parent, "adb"))
542 && strcmp(kbd->parent->type, "adb") == 0)
543 break; 542 break;
544 of_node_put(kbd); 543 of_node_put(kbd);
545 if (kbd) 544 if (kbd)
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index d18d16489a15..bdf9b716e848 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -255,7 +255,7 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
255 255
256 chan->ring_size = ring_size; 256 chan->ring_size = ring_size;
257 257
258 chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev, 258 chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
259 ring_size * sizeof(u64), 259 ring_size * sizeof(u64),
260 &chan->ring_dma, GFP_KERNEL); 260 &chan->ring_dma, GFP_KERNEL);
261 261
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index d7f742ed48ba..3f58c7dbd581 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
564 } 564 }
565 } else { 565 } else {
566 /* Create a group for 1 GPU and attached NPUs for POWER8 */ 566 /* Create a group for 1 GPU and attached NPUs for POWER8 */
567 pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL); 567 pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
568 table_group = &pe->npucomp->table_group; 568 table_group = &pe->npucomp->table_group;
569 table_group->ops = &pnv_npu_peers_ops; 569 table_group->ops = &pnv_npu_peers_ops;
570 iommu_register_group(table_group, hose->global_number, 570 iommu_register_group(table_group, hose->global_number,
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 1d6406a051f1..145373f0e5dc 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1593,6 +1593,8 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1593 1593
1594 pnv_pci_ioda2_setup_dma_pe(phb, pe); 1594 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1595#ifdef CONFIG_IOMMU_API 1595#ifdef CONFIG_IOMMU_API
1596 iommu_register_group(&pe->table_group,
1597 pe->phb->hose->global_number, pe->pe_number);
1596 pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL); 1598 pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL);
1597#endif 1599#endif
1598 } 1600 }
@@ -2681,7 +2683,8 @@ static void pnv_pci_ioda_setup_iommu_api(void)
2681 list_for_each_entry(hose, &hose_list, list_node) { 2683 list_for_each_entry(hose, &hose_list, list_node) {
2682 phb = hose->private_data; 2684 phb = hose->private_data;
2683 2685
2684 if (phb->type == PNV_PHB_NPU_NVLINK) 2686 if (phb->type == PNV_PHB_NPU_NVLINK ||
2687 phb->type == PNV_PHB_NPU_OCAPI)
2685 continue; 2688 continue;
2686 2689
2687 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 2690 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 45fb70b4bfa7..ef9448a907c6 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -1147,6 +1147,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
1147 return 0; 1147 return 0;
1148 1148
1149 pe = &phb->ioda.pe_array[pdn->pe_number]; 1149 pe = &phb->ioda.pe_array[pdn->pe_number];
1150 if (!pe->table_group.group)
1151 return 0;
1150 iommu_add_device(&pe->table_group, dev); 1152 iommu_add_device(&pe->table_group, dev);
1151 return 0; 1153 return 0;
1152 case BUS_NOTIFY_DEL_DEVICE: 1154 case BUS_NOTIFY_DEL_DEVICE:
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 7d6457ab5d34..bba281b1fe1b 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
43{ 43{
44 unsigned long ret[PLPAR_HCALL_BUFSIZE]; 44 unsigned long ret[PLPAR_HCALL_BUFSIZE];
45 uint64_t rc, token; 45 uint64_t rc, token;
46 uint64_t saved = 0;
46 47
47 /* 48 /*
48 * When the hypervisor cannot map all the requested memory in a single 49 * When the hypervisor cannot map all the requested memory in a single
@@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
56 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, 57 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
57 p->blocks, BIND_ANY_ADDR, token); 58 p->blocks, BIND_ANY_ADDR, token);
58 token = ret[0]; 59 token = ret[0];
60 if (!saved)
61 saved = ret[1];
59 cond_resched(); 62 cond_resched();
60 } while (rc == H_BUSY); 63 } while (rc == H_BUSY);
61 64
@@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
64 return -ENXIO; 67 return -ENXIO;
65 } 68 }
66 69
67 p->bound_addr = ret[1]; 70 p->bound_addr = saved;
68 71
69 dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); 72 dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
70 73
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 7725825d887d..37a77e57893e 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void)
264 if (!of_device_is_compatible(nvdn->parent, 264 if (!of_device_is_compatible(nvdn->parent,
265 "ibm,power9-npu")) 265 "ibm,power9-npu"))
266 continue; 266 continue;
267#ifdef CONFIG_PPC_POWERNV
267 WARN_ON_ONCE(pnv_npu2_init(hose)); 268 WARN_ON_ONCE(pnv_npu2_init(hose));
269#endif
268 break; 270 break;
269 } 271 }
270 } 272 }
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index 8b0ebf3940d2..ebed46f80254 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -756,9 +756,10 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
756 } 756 }
757 757
758 /* Initialize outbound message descriptor ring */ 758 /* Initialize outbound message descriptor ring */
759 rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev, 759 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
760 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 760 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
761 &rmu->msg_tx_ring.phys, GFP_KERNEL); 761 &rmu->msg_tx_ring.phys,
762 GFP_KERNEL);
762 if (!rmu->msg_tx_ring.virt) { 763 if (!rmu->msg_tx_ring.virt) {
763 rc = -ENOMEM; 764 rc = -ENOMEM;
764 goto out_dma; 765 goto out_dma;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index e0d7d61779a6..515fc3cc9687 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -28,11 +28,13 @@ config RISCV
28 select GENERIC_STRNLEN_USER 28 select GENERIC_STRNLEN_USER
29 select GENERIC_SMP_IDLE_THREAD 29 select GENERIC_SMP_IDLE_THREAD
30 select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A 30 select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
31 select HAVE_ARCH_AUDITSYSCALL
31 select HAVE_MEMBLOCK_NODE_MAP 32 select HAVE_MEMBLOCK_NODE_MAP
32 select HAVE_DMA_CONTIGUOUS 33 select HAVE_DMA_CONTIGUOUS
33 select HAVE_FUTEX_CMPXCHG if FUTEX 34 select HAVE_FUTEX_CMPXCHG if FUTEX
34 select HAVE_GENERIC_DMA_COHERENT 35 select HAVE_GENERIC_DMA_COHERENT
35 select HAVE_PERF_EVENTS 36 select HAVE_PERF_EVENTS
37 select HAVE_SYSCALL_TRACEPOINTS
36 select IRQ_DOMAIN 38 select IRQ_DOMAIN
37 select RISCV_ISA_A if SMP 39 select RISCV_ISA_A if SMP
38 select SPARSE_IRQ 40 select SPARSE_IRQ
@@ -40,6 +42,7 @@ config RISCV
40 select HAVE_ARCH_TRACEHOOK 42 select HAVE_ARCH_TRACEHOOK
41 select HAVE_PCI 43 select HAVE_PCI
42 select MODULES_USE_ELF_RELA if MODULES 44 select MODULES_USE_ELF_RELA if MODULES
45 select MODULE_SECTIONS if MODULES
43 select THREAD_INFO_IN_TASK 46 select THREAD_INFO_IN_TASK
44 select PCI_DOMAINS_GENERIC if PCI 47 select PCI_DOMAINS_GENERIC if PCI
45 select PCI_MSI if PCI 48 select PCI_MSI if PCI
@@ -100,7 +103,7 @@ choice
100 prompt "Base ISA" 103 prompt "Base ISA"
101 default ARCH_RV64I 104 default ARCH_RV64I
102 help 105 help
103 This selects the base ISA that this kernel will traget and must match 106 This selects the base ISA that this kernel will target and must match
104 the target platform. 107 the target platform.
105 108
106config ARCH_RV32I 109config ARCH_RV32I
@@ -152,7 +155,6 @@ choice
152 bool "2GiB" 155 bool "2GiB"
153 config MAXPHYSMEM_128GB 156 config MAXPHYSMEM_128GB
154 depends on 64BIT && CMODEL_MEDANY 157 depends on 64BIT && CMODEL_MEDANY
155 select MODULE_SECTIONS if MODULES
156 bool "128GiB" 158 bool "128GiB"
157endchoice 159endchoice
158 160
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index f399659d3b8d..2fd3461e50ab 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -13,8 +13,6 @@ CONFIG_BLK_DEV_INITRD=y
13CONFIG_EXPERT=y 13CONFIG_EXPERT=y
14CONFIG_BPF_SYSCALL=y 14CONFIG_BPF_SYSCALL=y
15CONFIG_SMP=y 15CONFIG_SMP=y
16CONFIG_PCI=y
17CONFIG_PCIE_XILINX=y
18CONFIG_MODULES=y 16CONFIG_MODULES=y
19CONFIG_MODULE_UNLOAD=y 17CONFIG_MODULE_UNLOAD=y
20CONFIG_NET=y 18CONFIG_NET=y
@@ -28,6 +26,10 @@ CONFIG_IP_PNP_DHCP=y
28CONFIG_IP_PNP_BOOTP=y 26CONFIG_IP_PNP_BOOTP=y
29CONFIG_IP_PNP_RARP=y 27CONFIG_IP_PNP_RARP=y
30CONFIG_NETLINK_DIAG=y 28CONFIG_NETLINK_DIAG=y
29CONFIG_PCI=y
30CONFIG_PCIEPORTBUS=y
31CONFIG_PCI_HOST_GENERIC=y
32CONFIG_PCIE_XILINX=y
31CONFIG_DEVTMPFS=y 33CONFIG_DEVTMPFS=y
32CONFIG_BLK_DEV_LOOP=y 34CONFIG_BLK_DEV_LOOP=y
33CONFIG_VIRTIO_BLK=y 35CONFIG_VIRTIO_BLK=y
@@ -63,7 +65,6 @@ CONFIG_USB_STORAGE=y
63CONFIG_USB_UAS=y 65CONFIG_USB_UAS=y
64CONFIG_VIRTIO_MMIO=y 66CONFIG_VIRTIO_MMIO=y
65CONFIG_SIFIVE_PLIC=y 67CONFIG_SIFIVE_PLIC=y
66CONFIG_RAS=y
67CONFIG_EXT4_FS=y 68CONFIG_EXT4_FS=y
68CONFIG_EXT4_FS_POSIX_ACL=y 69CONFIG_EXT4_FS_POSIX_ACL=y
69CONFIG_AUTOFS4_FS=y 70CONFIG_AUTOFS4_FS=y
@@ -77,5 +78,6 @@ CONFIG_NFS_V4_1=y
77CONFIG_NFS_V4_2=y 78CONFIG_NFS_V4_2=y
78CONFIG_ROOT_NFS=y 79CONFIG_ROOT_NFS=y
79CONFIG_CRYPTO_USER_API_HASH=y 80CONFIG_CRYPTO_USER_API_HASH=y
81CONFIG_CRYPTO_DEV_VIRTIO=y
80CONFIG_PRINTK_TIME=y 82CONFIG_PRINTK_TIME=y
81# CONFIG_RCU_TRACE is not set 83# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
index cd2af4b013e3..46202dad365d 100644
--- a/arch/riscv/include/asm/module.h
+++ b/arch/riscv/include/asm/module.h
@@ -9,12 +9,12 @@
9#define MODULE_ARCH_VERMAGIC "riscv" 9#define MODULE_ARCH_VERMAGIC "riscv"
10 10
11struct module; 11struct module;
12u64 module_emit_got_entry(struct module *mod, u64 val); 12unsigned long module_emit_got_entry(struct module *mod, unsigned long val);
13u64 module_emit_plt_entry(struct module *mod, u64 val); 13unsigned long module_emit_plt_entry(struct module *mod, unsigned long val);
14 14
15#ifdef CONFIG_MODULE_SECTIONS 15#ifdef CONFIG_MODULE_SECTIONS
16struct mod_section { 16struct mod_section {
17 struct elf64_shdr *shdr; 17 Elf_Shdr *shdr;
18 int num_entries; 18 int num_entries;
19 int max_entries; 19 int max_entries;
20}; 20};
@@ -26,18 +26,18 @@ struct mod_arch_specific {
26}; 26};
27 27
28struct got_entry { 28struct got_entry {
29 u64 symbol_addr; /* the real variable address */ 29 unsigned long symbol_addr; /* the real variable address */
30}; 30};
31 31
32static inline struct got_entry emit_got_entry(u64 val) 32static inline struct got_entry emit_got_entry(unsigned long val)
33{ 33{
34 return (struct got_entry) {val}; 34 return (struct got_entry) {val};
35} 35}
36 36
37static inline struct got_entry *get_got_entry(u64 val, 37static inline struct got_entry *get_got_entry(unsigned long val,
38 const struct mod_section *sec) 38 const struct mod_section *sec)
39{ 39{
40 struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr; 40 struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr);
41 int i; 41 int i;
42 for (i = 0; i < sec->num_entries; i++) { 42 for (i = 0; i < sec->num_entries; i++) {
43 if (got[i].symbol_addr == val) 43 if (got[i].symbol_addr == val)
@@ -62,7 +62,9 @@ struct plt_entry {
62#define REG_T0 0x5 62#define REG_T0 0x5
63#define REG_T1 0x6 63#define REG_T1 0x6
64 64
65static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt) 65static inline struct plt_entry emit_plt_entry(unsigned long val,
66 unsigned long plt,
67 unsigned long got_plt)
66{ 68{
67 /* 69 /*
68 * U-Type encoding: 70 * U-Type encoding:
@@ -76,7 +78,7 @@ static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt)
76 * +------------+------------+--------+----------+----------+ 78 * +------------+------------+--------+----------+----------+
77 * 79 *
78 */ 80 */
79 u64 offset = got_plt - plt; 81 unsigned long offset = got_plt - plt;
80 u32 hi20 = (offset + 0x800) & 0xfffff000; 82 u32 hi20 = (offset + 0x800) & 0xfffff000;
81 u32 lo12 = (offset - hi20); 83 u32 lo12 = (offset - hi20);
82 return (struct plt_entry) { 84 return (struct plt_entry) {
@@ -86,7 +88,7 @@ static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt)
86 }; 88 };
87} 89}
88 90
89static inline int get_got_plt_idx(u64 val, const struct mod_section *sec) 91static inline int get_got_plt_idx(unsigned long val, const struct mod_section *sec)
90{ 92{
91 struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr; 93 struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr;
92 int i; 94 int i;
@@ -97,9 +99,9 @@ static inline int get_got_plt_idx(u64 val, const struct mod_section *sec)
97 return -1; 99 return -1;
98} 100}
99 101
100static inline struct plt_entry *get_plt_entry(u64 val, 102static inline struct plt_entry *get_plt_entry(unsigned long val,
101 const struct mod_section *sec_plt, 103 const struct mod_section *sec_plt,
102 const struct mod_section *sec_got_plt) 104 const struct mod_section *sec_got_plt)
103{ 105{
104 struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr; 106 struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
105 int got_plt_idx = get_got_plt_idx(val, sec_got_plt); 107 int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 06cfbb3aacbb..2a546a52f02a 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -80,7 +80,7 @@ typedef struct page *pgtable_t;
80#define __pgd(x) ((pgd_t) { (x) }) 80#define __pgd(x) ((pgd_t) { (x) })
81#define __pgprot(x) ((pgprot_t) { (x) }) 81#define __pgprot(x) ((pgprot_t) { (x) })
82 82
83#ifdef CONFIG_64BITS 83#ifdef CONFIG_64BIT
84#define PTE_FMT "%016lx" 84#define PTE_FMT "%016lx"
85#else 85#else
86#define PTE_FMT "%08lx" 86#define PTE_FMT "%08lx"
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index 2fa2942be221..470755cb7558 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -35,6 +35,12 @@
35#define _PAGE_SPECIAL _PAGE_SOFT 35#define _PAGE_SPECIAL _PAGE_SOFT
36#define _PAGE_TABLE _PAGE_PRESENT 36#define _PAGE_TABLE _PAGE_PRESENT
37 37
38/*
39 * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
40 * distinguish them from swapped out pages
41 */
42#define _PAGE_PROT_NONE _PAGE_READ
43
38#define _PAGE_PFN_SHIFT 10 44#define _PAGE_PFN_SHIFT 10
39 45
40/* Set of bits to preserve across pte_modify() */ 46/* Set of bits to preserve across pte_modify() */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 16301966d65b..a8179a8c1491 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -44,7 +44,7 @@
44/* Page protection bits */ 44/* Page protection bits */
45#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 45#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
46 46
47#define PAGE_NONE __pgprot(0) 47#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
48#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 48#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
49#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 49#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
50#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) 50#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
@@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
98 98
99static inline int pmd_present(pmd_t pmd) 99static inline int pmd_present(pmd_t pmd)
100{ 100{
101 return (pmd_val(pmd) & _PAGE_PRESENT); 101 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
102} 102}
103 103
104static inline int pmd_none(pmd_t pmd) 104static inline int pmd_none(pmd_t pmd)
@@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
178 178
179static inline int pte_present(pte_t pte) 179static inline int pte_present(pte_t pte)
180{ 180{
181 return (pte_val(pte) & _PAGE_PRESENT); 181 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
182} 182}
183 183
184static inline int pte_none(pte_t pte) 184static inline int pte_none(pte_t pte)
@@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
380 * 380 *
381 * Format of swap PTE: 381 * Format of swap PTE:
382 * bit 0: _PAGE_PRESENT (zero) 382 * bit 0: _PAGE_PRESENT (zero)
383 * bit 1: reserved for future use (zero) 383 * bit 1: _PAGE_PROT_NONE (zero)
384 * bits 2 to 6: swap type 384 * bits 2 to 6: swap type
385 * bits 7 to XLEN-1: swap offset 385 * bits 7 to XLEN-1: swap offset
386 */ 386 */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 0531f49af5c3..ce70bceb8872 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -22,7 +22,7 @@
22 * This decides where the kernel will search for a free chunk of vm 22 * This decides where the kernel will search for a free chunk of vm
23 * space during mmap's. 23 * space during mmap's.
24 */ 24 */
25#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1) 25#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
26 26
27#define STACK_TOP TASK_SIZE 27#define STACK_TOP TASK_SIZE
28#define STACK_TOP_MAX STACK_TOP 28#define STACK_TOP_MAX STACK_TOP
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index bbe1862e8f80..d35ec2f41381 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -113,6 +113,11 @@ static inline void frame_pointer_set(struct pt_regs *regs,
113 SET_FP(regs, val); 113 SET_FP(regs, val);
114} 114}
115 115
116static inline unsigned long regs_return_value(struct pt_regs *regs)
117{
118 return regs->a0;
119}
120
116#endif /* __ASSEMBLY__ */ 121#endif /* __ASSEMBLY__ */
117 122
118#endif /* _ASM_RISCV_PTRACE_H */ 123#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 8d25f8904c00..bba3da6ef157 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -18,6 +18,7 @@
18#ifndef _ASM_RISCV_SYSCALL_H 18#ifndef _ASM_RISCV_SYSCALL_H
19#define _ASM_RISCV_SYSCALL_H 19#define _ASM_RISCV_SYSCALL_H
20 20
21#include <uapi/linux/audit.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
22#include <linux/err.h> 23#include <linux/err.h>
23 24
@@ -99,4 +100,13 @@ static inline void syscall_set_arguments(struct task_struct *task,
99 memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); 100 memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
100} 101}
101 102
103static inline int syscall_get_arch(void)
104{
105#ifdef CONFIG_64BIT
106 return AUDIT_ARCH_RISCV64;
107#else
108 return AUDIT_ARCH_RISCV32;
109#endif
110}
111
102#endif /* _ASM_RISCV_SYSCALL_H */ 112#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index f8fa1cd2dad9..1c9cc8389928 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -80,13 +80,19 @@ struct thread_info {
80#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ 80#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
81#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 81#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
82#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ 82#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
83#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */
83 84
84#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 85#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
85#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 86#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
86#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 87#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
87#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 88#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
89#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
90#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
88 91
89#define _TIF_WORK_MASK \ 92#define _TIF_WORK_MASK \
90 (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED) 93 (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
91 94
95#define _TIF_SYSCALL_WORK \
96 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
97
92#endif /* _ASM_RISCV_THREAD_INFO_H */ 98#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index fef96f117b4d..073ee80fdf74 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -19,3 +19,5 @@
19#define __ARCH_WANT_SYS_CLONE 19#define __ARCH_WANT_SYS_CLONE
20 20
21#include <uapi/asm/unistd.h> 21#include <uapi/asm/unistd.h>
22
23#define NR_syscalls (__NR_syscalls)
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 6a92a2fe198e..dac98348c6a3 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -39,6 +39,7 @@ void asm_offsets(void)
39 OFFSET(TASK_STACK, task_struct, stack); 39 OFFSET(TASK_STACK, task_struct, stack);
40 OFFSET(TASK_TI, task_struct, thread_info); 40 OFFSET(TASK_TI, task_struct, thread_info);
41 OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); 41 OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
42 OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
42 OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); 43 OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
43 OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); 44 OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
44 OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); 45 OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 13d4826ab2a1..fd9b57c8b4ce 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -144,6 +144,10 @@ _save_context:
144 REG_L x2, PT_SP(sp) 144 REG_L x2, PT_SP(sp)
145 .endm 145 .endm
146 146
147#if !IS_ENABLED(CONFIG_PREEMPT)
148.set resume_kernel, restore_all
149#endif
150
147ENTRY(handle_exception) 151ENTRY(handle_exception)
148 SAVE_ALL 152 SAVE_ALL
149 153
@@ -201,7 +205,7 @@ handle_syscall:
201 REG_S s2, PT_SEPC(sp) 205 REG_S s2, PT_SEPC(sp)
202 /* Trace syscalls, but only if requested by the user. */ 206 /* Trace syscalls, but only if requested by the user. */
203 REG_L t0, TASK_TI_FLAGS(tp) 207 REG_L t0, TASK_TI_FLAGS(tp)
204 andi t0, t0, _TIF_SYSCALL_TRACE 208 andi t0, t0, _TIF_SYSCALL_WORK
205 bnez t0, handle_syscall_trace_enter 209 bnez t0, handle_syscall_trace_enter
206check_syscall_nr: 210check_syscall_nr:
207 /* Check to make sure we don't jump to a bogus syscall number. */ 211 /* Check to make sure we don't jump to a bogus syscall number. */
@@ -221,14 +225,14 @@ ret_from_syscall:
221 REG_S a0, PT_A0(sp) 225 REG_S a0, PT_A0(sp)
222 /* Trace syscalls, but only if requested by the user. */ 226 /* Trace syscalls, but only if requested by the user. */
223 REG_L t0, TASK_TI_FLAGS(tp) 227 REG_L t0, TASK_TI_FLAGS(tp)
224 andi t0, t0, _TIF_SYSCALL_TRACE 228 andi t0, t0, _TIF_SYSCALL_WORK
225 bnez t0, handle_syscall_trace_exit 229 bnez t0, handle_syscall_trace_exit
226 230
227ret_from_exception: 231ret_from_exception:
228 REG_L s0, PT_SSTATUS(sp) 232 REG_L s0, PT_SSTATUS(sp)
229 csrc sstatus, SR_SIE 233 csrc sstatus, SR_SIE
230 andi s0, s0, SR_SPP 234 andi s0, s0, SR_SPP
231 bnez s0, restore_all 235 bnez s0, resume_kernel
232 236
233resume_userspace: 237resume_userspace:
234 /* Interrupts must be disabled here so flags are checked atomically */ 238 /* Interrupts must be disabled here so flags are checked atomically */
@@ -250,6 +254,18 @@ restore_all:
250 RESTORE_ALL 254 RESTORE_ALL
251 sret 255 sret
252 256
257#if IS_ENABLED(CONFIG_PREEMPT)
258resume_kernel:
259 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
260 bnez s0, restore_all
261need_resched:
262 REG_L s0, TASK_TI_FLAGS(tp)
263 andi s0, s0, _TIF_NEED_RESCHED
264 beqz s0, restore_all
265 call preempt_schedule_irq
266 j need_resched
267#endif
268
253work_pending: 269work_pending:
254 /* Enter slow path for supplementary processing */ 270 /* Enter slow path for supplementary processing */
255 la ra, ret_from_exception 271 la ra, ret_from_exception
diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c
index bbbd26e19bfd..c9ae48333114 100644
--- a/arch/riscv/kernel/module-sections.c
+++ b/arch/riscv/kernel/module-sections.c
@@ -9,14 +9,14 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h> 10#include <linux/module.h>
11 11
12u64 module_emit_got_entry(struct module *mod, u64 val) 12unsigned long module_emit_got_entry(struct module *mod, unsigned long val)
13{ 13{
14 struct mod_section *got_sec = &mod->arch.got; 14 struct mod_section *got_sec = &mod->arch.got;
15 int i = got_sec->num_entries; 15 int i = got_sec->num_entries;
16 struct got_entry *got = get_got_entry(val, got_sec); 16 struct got_entry *got = get_got_entry(val, got_sec);
17 17
18 if (got) 18 if (got)
19 return (u64)got; 19 return (unsigned long)got;
20 20
21 /* There is no duplicate entry, create a new one */ 21 /* There is no duplicate entry, create a new one */
22 got = (struct got_entry *)got_sec->shdr->sh_addr; 22 got = (struct got_entry *)got_sec->shdr->sh_addr;
@@ -25,10 +25,10 @@ u64 module_emit_got_entry(struct module *mod, u64 val)
25 got_sec->num_entries++; 25 got_sec->num_entries++;
26 BUG_ON(got_sec->num_entries > got_sec->max_entries); 26 BUG_ON(got_sec->num_entries > got_sec->max_entries);
27 27
28 return (u64)&got[i]; 28 return (unsigned long)&got[i];
29} 29}
30 30
31u64 module_emit_plt_entry(struct module *mod, u64 val) 31unsigned long module_emit_plt_entry(struct module *mod, unsigned long val)
32{ 32{
33 struct mod_section *got_plt_sec = &mod->arch.got_plt; 33 struct mod_section *got_plt_sec = &mod->arch.got_plt;
34 struct got_entry *got_plt; 34 struct got_entry *got_plt;
@@ -37,27 +37,29 @@ u64 module_emit_plt_entry(struct module *mod, u64 val)
37 int i = plt_sec->num_entries; 37 int i = plt_sec->num_entries;
38 38
39 if (plt) 39 if (plt)
40 return (u64)plt; 40 return (unsigned long)plt;
41 41
42 /* There is no duplicate entry, create a new one */ 42 /* There is no duplicate entry, create a new one */
43 got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr; 43 got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr;
44 got_plt[i] = emit_got_entry(val); 44 got_plt[i] = emit_got_entry(val);
45 plt = (struct plt_entry *)plt_sec->shdr->sh_addr; 45 plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
46 plt[i] = emit_plt_entry(val, (u64)&plt[i], (u64)&got_plt[i]); 46 plt[i] = emit_plt_entry(val,
47 (unsigned long)&plt[i],
48 (unsigned long)&got_plt[i]);
47 49
48 plt_sec->num_entries++; 50 plt_sec->num_entries++;
49 got_plt_sec->num_entries++; 51 got_plt_sec->num_entries++;
50 BUG_ON(plt_sec->num_entries > plt_sec->max_entries); 52 BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
51 53
52 return (u64)&plt[i]; 54 return (unsigned long)&plt[i];
53} 55}
54 56
55static int is_rela_equal(const Elf64_Rela *x, const Elf64_Rela *y) 57static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
56{ 58{
57 return x->r_info == y->r_info && x->r_addend == y->r_addend; 59 return x->r_info == y->r_info && x->r_addend == y->r_addend;
58} 60}
59 61
60static bool duplicate_rela(const Elf64_Rela *rela, int idx) 62static bool duplicate_rela(const Elf_Rela *rela, int idx)
61{ 63{
62 int i; 64 int i;
63 for (i = 0; i < idx; i++) { 65 for (i = 0; i < idx; i++) {
@@ -67,13 +69,13 @@ static bool duplicate_rela(const Elf64_Rela *rela, int idx)
67 return false; 69 return false;
68} 70}
69 71
70static void count_max_entries(Elf64_Rela *relas, int num, 72static void count_max_entries(Elf_Rela *relas, int num,
71 unsigned int *plts, unsigned int *gots) 73 unsigned int *plts, unsigned int *gots)
72{ 74{
73 unsigned int type, i; 75 unsigned int type, i;
74 76
75 for (i = 0; i < num; i++) { 77 for (i = 0; i < num; i++) {
76 type = ELF64_R_TYPE(relas[i].r_info); 78 type = ELF_RISCV_R_TYPE(relas[i].r_info);
77 if (type == R_RISCV_CALL_PLT) { 79 if (type == R_RISCV_CALL_PLT) {
78 if (!duplicate_rela(relas, i)) 80 if (!duplicate_rela(relas, i))
79 (*plts)++; 81 (*plts)++;
@@ -118,9 +120,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
118 120
119 /* Calculate the maxinum number of entries */ 121 /* Calculate the maxinum number of entries */
120 for (i = 0; i < ehdr->e_shnum; i++) { 122 for (i = 0; i < ehdr->e_shnum; i++) {
121 Elf64_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset; 123 Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
122 int num_rela = sechdrs[i].sh_size / sizeof(Elf64_Rela); 124 int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
123 Elf64_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info; 125 Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
124 126
125 if (sechdrs[i].sh_type != SHT_RELA) 127 if (sechdrs[i].sh_type != SHT_RELA)
126 continue; 128 continue;
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 60f1e02eed36..2ae5e0284f56 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -18,12 +18,15 @@
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19#include <asm/syscall.h> 19#include <asm/syscall.h>
20#include <asm/thread_info.h> 20#include <asm/thread_info.h>
21#include <linux/audit.h>
21#include <linux/ptrace.h> 22#include <linux/ptrace.h>
22#include <linux/elf.h> 23#include <linux/elf.h>
23#include <linux/regset.h> 24#include <linux/regset.h>
24#include <linux/sched.h> 25#include <linux/sched.h>
25#include <linux/sched/task_stack.h> 26#include <linux/sched/task_stack.h>
26#include <linux/tracehook.h> 27#include <linux/tracehook.h>
28
29#define CREATE_TRACE_POINTS
27#include <trace/events/syscalls.h> 30#include <trace/events/syscalls.h>
28 31
29enum riscv_regset { 32enum riscv_regset {
@@ -163,15 +166,19 @@ void do_syscall_trace_enter(struct pt_regs *regs)
163 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 166 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
164 trace_sys_enter(regs, syscall_get_nr(current, regs)); 167 trace_sys_enter(regs, syscall_get_nr(current, regs));
165#endif 168#endif
169
170 audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
166} 171}
167 172
168void do_syscall_trace_exit(struct pt_regs *regs) 173void do_syscall_trace_exit(struct pt_regs *regs)
169{ 174{
175 audit_syscall_exit(regs);
176
170 if (test_thread_flag(TIF_SYSCALL_TRACE)) 177 if (test_thread_flag(TIF_SYSCALL_TRACE))
171 tracehook_report_syscall_exit(regs, 0); 178 tracehook_report_syscall_exit(regs, 0);
172 179
173#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 180#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
174 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 181 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
175 trace_sys_exit(regs, regs->regs[0]); 182 trace_sys_exit(regs, regs_return_value(regs));
176#endif 183#endif
177} 184}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index fc8006a042eb..77564310235f 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -149,7 +149,14 @@ asmlinkage void __init setup_vm(void)
149 149
150void __init parse_dtb(unsigned int hartid, void *dtb) 150void __init parse_dtb(unsigned int hartid, void *dtb)
151{ 151{
152 early_init_dt_scan(__va(dtb)); 152 if (!early_init_dt_scan(__va(dtb)))
153 return;
154
155 pr_err("No DTB passed to the kernel\n");
156#ifdef CONFIG_CMDLINE_FORCE
157 strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
158 pr_info("Forcing kernel command line to: %s\n", boot_command_line);
159#endif
153} 160}
154 161
155static void __init setup_bootmem(void) 162static void __init setup_bootmem(void)
@@ -174,7 +181,7 @@ static void __init setup_bootmem(void)
174 BUG_ON(mem_size == 0); 181 BUG_ON(mem_size == 0);
175 182
176 set_max_mapnr(PFN_DOWN(mem_size)); 183 set_max_mapnr(PFN_DOWN(mem_size));
177 max_low_pfn = memblock_end_of_DRAM(); 184 max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
178 185
179#ifdef CONFIG_BLK_DEV_INITRD 186#ifdef CONFIG_BLK_DEV_INITRD
180 setup_initrd(); 187 setup_initrd();
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 57b1383e5ef7..246635eac7bb 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -23,6 +23,7 @@
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26#include <linux/delay.h>
26 27
27#include <asm/sbi.h> 28#include <asm/sbi.h>
28#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
@@ -31,6 +32,7 @@
31enum ipi_message_type { 32enum ipi_message_type {
32 IPI_RESCHEDULE, 33 IPI_RESCHEDULE,
33 IPI_CALL_FUNC, 34 IPI_CALL_FUNC,
35 IPI_CPU_STOP,
34 IPI_MAX 36 IPI_MAX
35}; 37};
36 38
@@ -66,6 +68,13 @@ int setup_profiling_timer(unsigned int multiplier)
66 return -EINVAL; 68 return -EINVAL;
67} 69}
68 70
71static void ipi_stop(void)
72{
73 set_cpu_online(smp_processor_id(), false);
74 while (1)
75 wait_for_interrupt();
76}
77
69void riscv_software_interrupt(void) 78void riscv_software_interrupt(void)
70{ 79{
71 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; 80 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
@@ -94,6 +103,11 @@ void riscv_software_interrupt(void)
94 generic_smp_call_function_interrupt(); 103 generic_smp_call_function_interrupt();
95 } 104 }
96 105
106 if (ops & (1 << IPI_CPU_STOP)) {
107 stats[IPI_CPU_STOP]++;
108 ipi_stop();
109 }
110
97 BUG_ON((ops >> IPI_MAX) != 0); 111 BUG_ON((ops >> IPI_MAX) != 0);
98 112
99 /* Order data access and bit testing. */ 113 /* Order data access and bit testing. */
@@ -121,6 +135,7 @@ send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
121static const char * const ipi_names[] = { 135static const char * const ipi_names[] = {
122 [IPI_RESCHEDULE] = "Rescheduling interrupts", 136 [IPI_RESCHEDULE] = "Rescheduling interrupts",
123 [IPI_CALL_FUNC] = "Function call interrupts", 137 [IPI_CALL_FUNC] = "Function call interrupts",
138 [IPI_CPU_STOP] = "CPU stop interrupts",
124}; 139};
125 140
126void show_ipi_stats(struct seq_file *p, int prec) 141void show_ipi_stats(struct seq_file *p, int prec)
@@ -146,15 +161,29 @@ void arch_send_call_function_single_ipi(int cpu)
146 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); 161 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
147} 162}
148 163
149static void ipi_stop(void *unused)
150{
151 while (1)
152 wait_for_interrupt();
153}
154
155void smp_send_stop(void) 164void smp_send_stop(void)
156{ 165{
157 on_each_cpu(ipi_stop, NULL, 1); 166 unsigned long timeout;
167
168 if (num_online_cpus() > 1) {
169 cpumask_t mask;
170
171 cpumask_copy(&mask, cpu_online_mask);
172 cpumask_clear_cpu(smp_processor_id(), &mask);
173
174 if (system_state <= SYSTEM_RUNNING)
175 pr_crit("SMP: stopping secondary CPUs\n");
176 send_ipi_message(&mask, IPI_CPU_STOP);
177 }
178
179 /* Wait up to one second for other CPUs to stop */
180 timeout = USEC_PER_SEC;
181 while (num_online_cpus() > 1 && timeout--)
182 udelay(1);
183
184 if (num_online_cpus() > 1)
185 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
186 cpumask_pr_args(cpu_online_mask));
158} 187}
159 188
160void smp_send_reschedule(int cpu) 189void smp_send_reschedule(int cpu)
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index fc185ecabb0a..18cda0e8cf94 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -57,15 +57,12 @@ void __init setup_smp(void)
57 57
58 while ((dn = of_find_node_by_type(dn, "cpu"))) { 58 while ((dn = of_find_node_by_type(dn, "cpu"))) {
59 hart = riscv_of_processor_hartid(dn); 59 hart = riscv_of_processor_hartid(dn);
60 if (hart < 0) { 60 if (hart < 0)
61 of_node_put(dn);
62 continue; 61 continue;
63 }
64 62
65 if (hart == cpuid_to_hartid_map(0)) { 63 if (hart == cpuid_to_hartid_map(0)) {
66 BUG_ON(found_boot_cpu); 64 BUG_ON(found_boot_cpu);
67 found_boot_cpu = 1; 65 found_boot_cpu = 1;
68 of_node_put(dn);
69 continue; 66 continue;
70 } 67 }
71 68
@@ -73,7 +70,6 @@ void __init setup_smp(void)
73 set_cpu_possible(cpuid, true); 70 set_cpu_possible(cpuid, true);
74 set_cpu_present(cpuid, true); 71 set_cpu_present(cpuid, true);
75 cpuid++; 72 cpuid++;
76 of_node_put(dn);
77 } 73 }
78 74
79 BUG_ON(!found_boot_cpu); 75 BUG_ON(!found_boot_cpu);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 1d9bfaff60bc..658ebf645f42 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -28,7 +28,8 @@ static void __init zone_sizes_init(void)
28 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; 28 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
29 29
30#ifdef CONFIG_ZONE_DMA32 30#ifdef CONFIG_ZONE_DMA32
31 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); 31 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
32 (unsigned long) PFN_PHYS(max_low_pfn)));
32#endif 33#endif
33 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 34 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
34 35
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index ccbb53e22024..8d04e6f3f796 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
25 atomic_set(&mm->context.flush_count, 0); 25 atomic_set(&mm->context.flush_count, 0);
26 mm->context.gmap_asce = 0; 26 mm->context.gmap_asce = 0;
27 mm->context.flush_mm = 0; 27 mm->context.flush_mm = 0;
28 mm->context.compat_mm = 0; 28 mm->context.compat_mm = test_thread_flag(TIF_31BIT);
29#ifdef CONFIG_PGSTE 29#ifdef CONFIG_PGSTE
30 mm->context.alloc_pgste = page_table_allocate_pgste || 30 mm->context.alloc_pgste = page_table_allocate_pgste ||
31 test_thread_flag(TIF_PGSTE) || 31 test_thread_flag(TIF_PGSTE) ||
@@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
90{ 90{
91 int cpu = smp_processor_id(); 91 int cpu = smp_processor_id();
92 92
93 if (prev == next)
94 return;
95 S390_lowcore.user_asce = next->context.asce; 93 S390_lowcore.user_asce = next->context.asce;
96 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); 94 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
97 /* Clear previous user-ASCE from CR1 and CR7 */ 95 /* Clear previous user-ASCE from CR1 and CR7 */
@@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
103 __ctl_load(S390_lowcore.vdso_asce, 7, 7); 101 __ctl_load(S390_lowcore.vdso_asce, 7, 7);
104 clear_cpu_flag(CIF_ASCE_SECONDARY); 102 clear_cpu_flag(CIF_ASCE_SECONDARY);
105 } 103 }
106 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 104 if (prev != next)
105 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
107} 106}
108 107
109#define finish_arch_post_lock_switch finish_arch_post_lock_switch 108#define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index af5c2b3f7065..a8c7789b246b 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void)
63 if (stsi(vmms, 3, 2, 2) || !vmms->count) 63 if (stsi(vmms, 3, 2, 2) || !vmms->count)
64 return; 64 return;
65 65
66 /* Running under KVM? If not we assume z/VM */ 66 /* Detect known hypervisors */
67 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) 67 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
68 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; 68 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
69 else 69 else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
70 S390_lowcore.machine_flags |= MACHINE_FLAG_VM; 70 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
71} 71}
72 72
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 72dd23ef771b..7ed90a759135 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p)
1006 pr_info("Linux is running under KVM in 64-bit mode\n"); 1006 pr_info("Linux is running under KVM in 64-bit mode\n");
1007 else if (MACHINE_IS_LPAR) 1007 else if (MACHINE_IS_LPAR)
1008 pr_info("Linux is running natively in 64-bit mode\n"); 1008 pr_info("Linux is running natively in 64-bit mode\n");
1009 else
1010 pr_info("Linux is running as a guest in 64-bit mode\n");
1009 1011
1010 /* Have one command line that is parsed and saved in /proc/cmdline */ 1012 /* Have one command line that is parsed and saved in /proc/cmdline */
1011 /* boot_command_line has been already set up in early.c */ 1013 /* boot_command_line has been already set up in early.c */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index f82b3d3c36e2..b198ece2aad6 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
381 */ 381 */
382void smp_call_ipl_cpu(void (*func)(void *), void *data) 382void smp_call_ipl_cpu(void (*func)(void *), void *data)
383{ 383{
384 struct lowcore *lc = pcpu_devices->lowcore;
385
386 if (pcpu_devices[0].address == stap())
387 lc = &S390_lowcore;
388
384 pcpu_delegate(&pcpu_devices[0], func, data, 389 pcpu_delegate(&pcpu_devices[0], func, data,
385 pcpu_devices->lowcore->nodat_stack); 390 lc->nodat_stack);
386} 391}
387 392
388int smp_find_processor_id(u16 address) 393int smp_find_processor_id(u16 address)
@@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev,
1166{ 1171{
1167 int rc; 1172 int rc;
1168 1173
1174 rc = lock_device_hotplug_sysfs();
1175 if (rc)
1176 return rc;
1169 rc = smp_rescan_cpus(); 1177 rc = smp_rescan_cpus();
1178 unlock_device_hotplug();
1170 return rc ? rc : count; 1179 return rc ? rc : count;
1171} 1180}
1172static DEVICE_ATTR_WO(rescan); 1181static DEVICE_ATTR_WO(rescan);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 537f97fde37f..b6796e616812 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -30,10 +30,10 @@
30 .section .text 30 .section .text
31ENTRY(swsusp_arch_suspend) 31ENTRY(swsusp_arch_suspend)
32 lg %r1,__LC_NODAT_STACK 32 lg %r1,__LC_NODAT_STACK
33 aghi %r1,-STACK_FRAME_OVERHEAD
34 stmg %r6,%r15,__SF_GPRS(%r1) 33 stmg %r6,%r15,__SF_GPRS(%r1)
34 aghi %r1,-STACK_FRAME_OVERHEAD
35 stg %r15,__SF_BACKCHAIN(%r1) 35 stg %r15,__SF_BACKCHAIN(%r1)
36 lgr %r1,%r15 36 lgr %r15,%r1
37 37
38 /* Store FPU registers */ 38 /* Store FPU registers */
39 brasl %r14,save_fpu_regs 39 brasl %r14,save_fpu_regs
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index ebe748a9f472..4ff354887db4 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
224 224
225 vdso_pages = vdso64_pages; 225 vdso_pages = vdso64_pages;
226#ifdef CONFIG_COMPAT 226#ifdef CONFIG_COMPAT
227 if (is_compat_task()) { 227 mm->context.compat_mm = is_compat_task();
228 if (mm->context.compat_mm)
228 vdso_pages = vdso32_pages; 229 vdso_pages = vdso32_pages;
229 mm->context.compat_mm = 1;
230 }
231#endif 230#endif
232 /* 231 /*
233 * vDSO has a problem and was disabled, just don't "enable" it for 232 * vDSO has a problem and was disabled, just don't "enable" it for
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a153257bf7d9..d62fa148558b 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
297 scb_s->crycbd = 0; 297 scb_s->crycbd = 0;
298 298
299 apie_h = vcpu->arch.sie_block->eca & ECA_APIE; 299 apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
300 if (!apie_h && !key_msk) 300 if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
301 return 0; 301 return 0;
302 302
303 if (!crycb_addr) 303 if (!crycb_addr)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index a966d7bfac57..4266a4de3160 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -382,7 +382,9 @@ static void zpci_irq_handler(struct airq_struct *airq)
382 if (ai == -1UL) 382 if (ai == -1UL)
383 break; 383 break;
384 inc_irq_stat(IRQIO_MSI); 384 inc_irq_stat(IRQIO_MSI);
385 airq_iv_lock(aibv, ai);
385 generic_handle_irq(airq_iv_get_data(aibv, ai)); 386 generic_handle_irq(airq_iv_get_data(aibv, ai));
387 airq_iv_unlock(aibv, ai);
386 } 388 }
387 } 389 }
388} 390}
@@ -408,7 +410,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
408 zdev->aisb = aisb; 410 zdev->aisb = aisb;
409 411
410 /* Create adapter interrupt vector */ 412 /* Create adapter interrupt vector */
411 zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA); 413 zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
412 if (!zdev->aibv) 414 if (!zdev->aibv)
413 return -ENOMEM; 415 return -ENOMEM;
414 416
diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile
index 01d0f7fb14cc..2563d1e532e2 100644
--- a/arch/sh/boot/dts/Makefile
+++ b/arch/sh/boot/dts/Makefile
@@ -1,3 +1,3 @@
1ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") 1ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
2obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o 2obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
3endif 3endif
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 1372553dc0a9..1d1544b6ca74 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -28,6 +28,7 @@ generic-y += preempt.h
28generic-y += sections.h 28generic-y += sections.h
29generic-y += segment.h 29generic-y += segment.h
30generic-y += serial.h 30generic-y += serial.h
31generic-y += shmparam.h
31generic-y += sizes.h 32generic-y += sizes.h
32generic-y += syscalls.h 33generic-y += syscalls.h
33generic-y += topology.h 34generic-y += topology.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6185d4f33296..68261430fe6e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -198,7 +198,7 @@ config X86
198 select IRQ_FORCED_THREADING 198 select IRQ_FORCED_THREADING
199 select NEED_SG_DMA_LENGTH 199 select NEED_SG_DMA_LENGTH
200 select PCI_DOMAINS if PCI 200 select PCI_DOMAINS if PCI
201 select PCI_LOCKLESS_CONFIG 201 select PCI_LOCKLESS_CONFIG if PCI
202 select PERF_EVENTS 202 select PERF_EVENTS
203 select RTC_LIB 203 select RTC_LIB
204 select RTC_MC146818_LIB 204 select RTC_MC146818_LIB
@@ -446,12 +446,12 @@ config RETPOLINE
446 branches. Requires a compiler with -mindirect-branch=thunk-extern 446 branches. Requires a compiler with -mindirect-branch=thunk-extern
447 support for full protection. The kernel may run slower. 447 support for full protection. The kernel may run slower.
448 448
449config RESCTRL 449config X86_CPU_RESCTRL
450 bool "Resource Control support" 450 bool "x86 CPU resource control support"
451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) 451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
452 select KERNFS 452 select KERNFS
453 help 453 help
454 Enable Resource Control support. 454 Enable x86 CPU resource control support.
455 455
456 Provide support for the allocation and monitoring of system resources 456 Provide support for the allocation and monitoring of system resources
457 usage by the CPU. 457 usage by the CPU.
@@ -617,7 +617,7 @@ config X86_INTEL_QUARK
617 617
618config X86_INTEL_LPSS 618config X86_INTEL_LPSS
619 bool "Intel Low Power Subsystem Support" 619 bool "Intel Low Power Subsystem Support"
620 depends on X86 && ACPI 620 depends on X86 && ACPI && PCI
621 select COMMON_CLK 621 select COMMON_CLK
622 select PINCTRL 622 select PINCTRL
623 select IOSF_MBI 623 select IOSF_MBI
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 64037895b085..f62e347862cc 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -600,6 +600,16 @@ ENTRY(trampoline_32bit_src)
600 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax 600 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
601 movl %eax, %cr3 601 movl %eax, %cr3
6023: 6023:
603 /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
604 pushl %ecx
605 pushl %edx
606 movl $MSR_EFER, %ecx
607 rdmsr
608 btsl $_EFER_LME, %eax
609 wrmsr
610 popl %edx
611 popl %ecx
612
603 /* Enable PAE and LA57 (if required) paging modes */ 613 /* Enable PAE and LA57 (if required) paging modes */
604 movl $X86_CR4_PAE, %eax 614 movl $X86_CR4_PAE, %eax
605 cmpl $0, %edx 615 cmpl $0, %edx
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index 91f75638f6e6..6ff7e81b5628 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -6,7 +6,7 @@
6#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 6#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
7 7
8#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE 8#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
9#define TRAMPOLINE_32BIT_CODE_SIZE 0x60 9#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
10 10
11#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE 11#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
12 12
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 8eaf8952c408..39913770a44d 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
361 361
362 /* Need to switch before accessing the thread stack. */ 362 /* Need to switch before accessing the thread stack. */
363 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi 363 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
364 movq %rsp, %rdi 364 /* In the Xen PV case we already run on the thread stack. */
365 ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
365 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 366 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
366 367
367 pushq 6*8(%rdi) /* regs->ss */ 368 pushq 6*8(%rdi) /* regs->ss */
@@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
370 pushq 3*8(%rdi) /* regs->cs */ 371 pushq 3*8(%rdi) /* regs->cs */
371 pushq 2*8(%rdi) /* regs->ip */ 372 pushq 2*8(%rdi) /* regs->ip */
372 pushq 1*8(%rdi) /* regs->orig_ax */ 373 pushq 1*8(%rdi) /* regs->orig_ax */
373
374 pushq (%rdi) /* pt_regs->di */ 374 pushq (%rdi) /* pt_regs->di */
375.Lint80_keep_stack:
376
375 pushq %rsi /* pt_regs->si */ 377 pushq %rsi /* pt_regs->si */
376 xorl %esi, %esi /* nospec si */ 378 xorl %esi, %esi /* nospec si */
377 pushq %rdx /* pt_regs->dx */ 379 pushq %rdx /* pt_regs->dx */
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 374a19712e20..b684f0294f35 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
2278 x86_pmu.check_microcode(); 2278 x86_pmu.check_microcode();
2279} 2279}
2280 2280
2281static int x86_pmu_check_period(struct perf_event *event, u64 value)
2282{
2283 if (x86_pmu.check_period && x86_pmu.check_period(event, value))
2284 return -EINVAL;
2285
2286 if (value && x86_pmu.limit_period) {
2287 if (x86_pmu.limit_period(event, value) > value)
2288 return -EINVAL;
2289 }
2290
2291 return 0;
2292}
2293
2281static struct pmu pmu = { 2294static struct pmu pmu = {
2282 .pmu_enable = x86_pmu_enable, 2295 .pmu_enable = x86_pmu_enable,
2283 .pmu_disable = x86_pmu_disable, 2296 .pmu_disable = x86_pmu_disable,
@@ -2302,6 +2315,7 @@ static struct pmu pmu = {
2302 .event_idx = x86_pmu_event_idx, 2315 .event_idx = x86_pmu_event_idx,
2303 .sched_task = x86_pmu_sched_task, 2316 .sched_task = x86_pmu_sched_task,
2304 .task_ctx_size = sizeof(struct x86_perf_task_context), 2317 .task_ctx_size = sizeof(struct x86_perf_task_context),
2318 .check_period = x86_pmu_check_period,
2305}; 2319};
2306 2320
2307void arch_perf_update_userpage(struct perf_event *event, 2321void arch_perf_update_userpage(struct perf_event *event,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 40e12cfc87f6..730978dff63f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3559,6 +3559,14 @@ static void free_excl_cntrs(int cpu)
3559 3559
3560static void intel_pmu_cpu_dying(int cpu) 3560static void intel_pmu_cpu_dying(int cpu)
3561{ 3561{
3562 fini_debug_store_on_cpu(cpu);
3563
3564 if (x86_pmu.counter_freezing)
3565 disable_counter_freeze();
3566}
3567
3568static void intel_pmu_cpu_dead(int cpu)
3569{
3562 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 3570 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3563 struct intel_shared_regs *pc; 3571 struct intel_shared_regs *pc;
3564 3572
@@ -3570,11 +3578,6 @@ static void intel_pmu_cpu_dying(int cpu)
3570 } 3578 }
3571 3579
3572 free_excl_cntrs(cpu); 3580 free_excl_cntrs(cpu);
3573
3574 fini_debug_store_on_cpu(cpu);
3575
3576 if (x86_pmu.counter_freezing)
3577 disable_counter_freeze();
3578} 3581}
3579 3582
3580static void intel_pmu_sched_task(struct perf_event_context *ctx, 3583static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3584,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
3584 intel_pmu_lbr_sched_task(ctx, sched_in); 3587 intel_pmu_lbr_sched_task(ctx, sched_in);
3585} 3588}
3586 3589
3590static int intel_pmu_check_period(struct perf_event *event, u64 value)
3591{
3592 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3593}
3594
3587PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 3595PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3588 3596
3589PMU_FORMAT_ATTR(ldlat, "config1:0-15"); 3597PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3663,6 +3671,9 @@ static __initconst const struct x86_pmu core_pmu = {
3663 .cpu_prepare = intel_pmu_cpu_prepare, 3671 .cpu_prepare = intel_pmu_cpu_prepare,
3664 .cpu_starting = intel_pmu_cpu_starting, 3672 .cpu_starting = intel_pmu_cpu_starting,
3665 .cpu_dying = intel_pmu_cpu_dying, 3673 .cpu_dying = intel_pmu_cpu_dying,
3674 .cpu_dead = intel_pmu_cpu_dead,
3675
3676 .check_period = intel_pmu_check_period,
3666}; 3677};
3667 3678
3668static struct attribute *intel_pmu_attrs[]; 3679static struct attribute *intel_pmu_attrs[];
@@ -3703,8 +3714,12 @@ static __initconst const struct x86_pmu intel_pmu = {
3703 .cpu_prepare = intel_pmu_cpu_prepare, 3714 .cpu_prepare = intel_pmu_cpu_prepare,
3704 .cpu_starting = intel_pmu_cpu_starting, 3715 .cpu_starting = intel_pmu_cpu_starting,
3705 .cpu_dying = intel_pmu_cpu_dying, 3716 .cpu_dying = intel_pmu_cpu_dying,
3717 .cpu_dead = intel_pmu_cpu_dead,
3718
3706 .guest_get_msrs = intel_guest_get_msrs, 3719 .guest_get_msrs = intel_guest_get_msrs,
3707 .sched_task = intel_pmu_sched_task, 3720 .sched_task = intel_pmu_sched_task,
3721
3722 .check_period = intel_pmu_check_period,
3708}; 3723};
3709 3724
3710static __init void intel_clovertown_quirk(void) 3725static __init void intel_clovertown_quirk(void)
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index c07bee31abe8..b10e04387f38 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
1222 .id_table = snbep_uncore_pci_ids, 1222 .id_table = snbep_uncore_pci_ids,
1223}; 1223};
1224 1224
1225#define NODE_ID_MASK 0x7
1226
1225/* 1227/*
1226 * build pci bus to socket mapping 1228 * build pci bus to socket mapping
1227 */ 1229 */
@@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
1243 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); 1245 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1244 if (err) 1246 if (err)
1245 break; 1247 break;
1246 nodeid = config; 1248 nodeid = config & NODE_ID_MASK;
1247 /* get the Node ID mapping */ 1249 /* get the Node ID mapping */
1248 err = pci_read_config_dword(ubox_dev, idmap_loc, &config); 1250 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1249 if (err) 1251 if (err)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 78d7b7031bfc..d46fd6754d92 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -646,6 +646,11 @@ struct x86_pmu {
646 * Intel host/guest support (KVM) 646 * Intel host/guest support (KVM)
647 */ 647 */
648 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); 648 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
649
650 /*
651 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
652 */
653 int (*check_period) (struct perf_event *event, u64 period);
649}; 654};
650 655
651struct x86_perf_task_context { 656struct x86_perf_task_context {
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void)
857 862
858#ifdef CONFIG_CPU_SUP_INTEL 863#ifdef CONFIG_CPU_SUP_INTEL
859 864
860static inline bool intel_pmu_has_bts(struct perf_event *event) 865static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
861{ 866{
862 struct hw_perf_event *hwc = &event->hw; 867 struct hw_perf_event *hwc = &event->hw;
863 unsigned int hw_event, bts_event; 868 unsigned int hw_event, bts_event;
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
868 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; 873 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
869 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 874 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
870 875
871 return hw_event == bts_event && hwc->sample_period == 1; 876 return hw_event == bts_event && period == 1;
877}
878
879static inline bool intel_pmu_has_bts(struct perf_event *event)
880{
881 struct hw_perf_event *hwc = &event->hw;
882
883 return intel_pmu_has_bts_period(event, hwc->sample_period);
872} 884}
873 885
874int intel_pmu_save_and_restart(struct perf_event *event); 886int intel_pmu_save_and_restart(struct perf_event *event);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index f65b78d32f5e..7dbbe9ffda17 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
51/* 51/*
52 * fill in the user structure for a core dump.. 52 * fill in the user structure for a core dump..
53 */ 53 */
54static void dump_thread32(struct pt_regs *regs, struct user32 *dump) 54static void fill_dump(struct pt_regs *regs, struct user32 *dump)
55{ 55{
56 u32 fs, gs; 56 u32 fs, gs;
57 memset(dump, 0, sizeof(*dump)); 57 memset(dump, 0, sizeof(*dump));
@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm)
157 fs = get_fs(); 157 fs = get_fs();
158 set_fs(KERNEL_DS); 158 set_fs(KERNEL_DS);
159 has_dumped = 1; 159 has_dumped = 1;
160
161 fill_dump(cprm->regs, &dump);
162
160 strncpy(dump.u_comm, current->comm, sizeof(current->comm)); 163 strncpy(dump.u_comm, current->comm, sizeof(current->comm));
161 dump.u_ar0 = offsetof(struct user32, regs); 164 dump.u_ar0 = offsetof(struct user32, regs);
162 dump.signal = cprm->siginfo->si_signo; 165 dump.signal = cprm->siginfo->si_signo;
163 dump_thread32(cprm->regs, &dump);
164 166
165 /* 167 /*
166 * If the size of the dump file exceeds the rlimit, then see 168 * If the size of the dump file exceeds the rlimit, then see
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 0dd6b0f4000e..9f15384c504a 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -6,7 +6,7 @@
6 * "Big Core" Processors (Branded as Core, Xeon, etc...) 6 * "Big Core" Processors (Branded as Core, Xeon, etc...)
7 * 7 *
8 * The "_X" parts are generally the EP and EX Xeons, or the 8 * The "_X" parts are generally the EP and EX Xeons, or the
9 * "Extreme" ones, like Broadwell-E. 9 * "Extreme" ones, like Broadwell-E, or Atom microserver.
10 * 10 *
11 * While adding a new CPUID for a new microarchitecture, add a new 11 * While adding a new CPUID for a new microarchitecture, add a new
12 * group to keep logically sorted out in chronological order. Within 12 * group to keep logically sorted out in chronological order. Within
@@ -52,6 +52,8 @@
52 52
53#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 53#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
54 54
55#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
56
55/* "Small Core" Processors (Atom) */ 57/* "Small Core" Processors (Atom) */
56 58
57#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ 59#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
@@ -71,6 +73,7 @@
71#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ 73#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
72#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ 74#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
73#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ 75#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
76#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
74 77
75/* Xeon Phi */ 78/* Xeon Phi */
76 79
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4660ce90de7f..180373360e34 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -299,6 +299,7 @@ union kvm_mmu_extended_role {
299 unsigned int cr4_smap:1; 299 unsigned int cr4_smap:1;
300 unsigned int cr4_smep:1; 300 unsigned int cr4_smep:1;
301 unsigned int cr4_la57:1; 301 unsigned int cr4_la57:1;
302 unsigned int maxphyaddr:6;
302 }; 303 };
303}; 304};
304 305
@@ -397,6 +398,7 @@ struct kvm_mmu {
397 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 398 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
398 u64 *spte, const void *pte); 399 u64 *spte, const void *pte);
399 hpa_t root_hpa; 400 hpa_t root_hpa;
401 gpa_t root_cr3;
400 union kvm_mmu_role mmu_role; 402 union kvm_mmu_role mmu_role;
401 u8 root_level; 403 u8 root_level;
402 u8 shadow_root_level; 404 u8 shadow_root_level;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 0ca50611e8ce..19d18fae6ec6 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
178 178
179void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); 179void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
180 180
181/*
182 * Init a new mm. Used on mm copies, like at fork()
183 * and on mm's that are brand-new, like at execve().
184 */
181static inline int init_new_context(struct task_struct *tsk, 185static inline int init_new_context(struct task_struct *tsk,
182 struct mm_struct *mm) 186 struct mm_struct *mm)
183{ 187{
@@ -228,8 +232,22 @@ do { \
228} while (0) 232} while (0)
229#endif 233#endif
230 234
235static inline void arch_dup_pkeys(struct mm_struct *oldmm,
236 struct mm_struct *mm)
237{
238#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
239 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
240 return;
241
242 /* Duplicate the oldmm pkey state in mm: */
243 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
244 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
245#endif
246}
247
231static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 248static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
232{ 249{
250 arch_dup_pkeys(oldmm, mm);
233 paravirt_arch_dup_mmap(oldmm, mm); 251 paravirt_arch_dup_mmap(oldmm, mm);
234 return ldt_dup_context(oldmm, mm); 252 return ldt_dup_context(oldmm, mm);
235} 253}
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8f657286d599..0ce558a8150d 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -7,7 +7,11 @@
7#endif 7#endif
8 8
9#ifdef CONFIG_KASAN 9#ifdef CONFIG_KASAN
10#ifdef CONFIG_KASAN_EXTRA
11#define KASAN_STACK_ORDER 2
12#else
10#define KASAN_STACK_ORDER 1 13#define KASAN_STACK_ORDER 1
14#endif
11#else 15#else
12#define KASAN_STACK_ORDER 0 16#define KASAN_STACK_ORDER 0
13#endif 17#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 40616e805292..2779ace16d23 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
1065static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1065static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1066 pmd_t *pmdp, pmd_t pmd) 1066 pmd_t *pmdp, pmd_t pmd)
1067{ 1067{
1068 native_set_pmd(pmdp, pmd); 1068 set_pmd(pmdp, pmd);
1069} 1069}
1070 1070
1071static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 1071static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h
index 54990fe2a3ae..f6b7fe2833cc 100644
--- a/arch/x86/include/asm/resctrl_sched.h
+++ b/arch/x86/include/asm/resctrl_sched.h
@@ -2,7 +2,7 @@
2#ifndef _ASM_X86_RESCTRL_SCHED_H 2#ifndef _ASM_X86_RESCTRL_SCHED_H
3#define _ASM_X86_RESCTRL_SCHED_H 3#define _ASM_X86_RESCTRL_SCHED_H
4 4
5#ifdef CONFIG_RESCTRL 5#ifdef CONFIG_X86_CPU_RESCTRL
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/jump_label.h> 8#include <linux/jump_label.h>
@@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void)
88 88
89static inline void resctrl_sched_in(void) {} 89static inline void resctrl_sched_in(void) {}
90 90
91#endif /* CONFIG_RESCTRL */ 91#endif /* CONFIG_X86_CPU_RESCTRL */
92 92
93#endif /* _ASM_X86_RESCTRL_SCHED_H */ 93#endif /* _ASM_X86_RESCTRL_SCHED_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a77445d1b034..780f2b42c8ef 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -711,7 +711,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
711{ 711{
712 if (unlikely(!access_ok(ptr,len))) 712 if (unlikely(!access_ok(ptr,len)))
713 return 0; 713 return 0;
714 __uaccess_begin(); 714 __uaccess_begin_nospec();
715 return 1; 715 return 1;
716} 716}
717#define user_access_begin(a,b) user_access_begin(a,b) 717#define user_access_begin(a,b) user_access_begin(a,b)
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index e652a7cc6186..3f697a9e3f59 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -48,7 +48,8 @@ enum {
48 BIOS_STATUS_SUCCESS = 0, 48 BIOS_STATUS_SUCCESS = 0,
49 BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, 49 BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
50 BIOS_STATUS_EINVAL = -EINVAL, 50 BIOS_STATUS_EINVAL = -EINVAL,
51 BIOS_STATUS_UNAVAIL = -EBUSY 51 BIOS_STATUS_UNAVAIL = -EBUSY,
52 BIOS_STATUS_ABORT = -EINTR,
52}; 53};
53 54
54/* Address map parameters */ 55/* Address map parameters */
@@ -167,4 +168,9 @@ extern long system_serial_number;
167 168
168extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ 169extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
169 170
171/*
172 * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
173 */
174extern struct semaphore __efi_uv_runtime_lock;
175
170#endif /* _ASM_X86_UV_BIOS_H */ 176#endif /* _ASM_X86_UV_BIOS_H */
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index ac78f90aea56..cfd24f9f7614 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
39obj-$(CONFIG_X86_MCE) += mce/ 39obj-$(CONFIG_X86_MCE) += mce/
40obj-$(CONFIG_MTRR) += mtrr/ 40obj-$(CONFIG_MTRR) += mtrr/
41obj-$(CONFIG_MICROCODE) += microcode/ 41obj-$(CONFIG_MICROCODE) += microcode/
42obj-$(CONFIG_RESCTRL) += resctrl/ 42obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/
43 43
44obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 44obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
45 45
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 8654b8b0c848..01874d54f4fd 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -71,7 +71,7 @@ void __init check_bugs(void)
71 * identify_boot_cpu() initialized SMT support information, let the 71 * identify_boot_cpu() initialized SMT support information, let the
72 * core code know. 72 * core code know.
73 */ 73 */
74 cpu_smt_check_topology_early(); 74 cpu_smt_check_topology();
75 75
76 if (!IS_ENABLED(CONFIG_SMP)) { 76 if (!IS_ENABLED(CONFIG_SMP)) {
77 pr_info("CPU: "); 77 pr_info("CPU: ");
@@ -215,7 +215,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
215static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = 215static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
216 SPECTRE_V2_USER_NONE; 216 SPECTRE_V2_USER_NONE;
217 217
218#ifdef RETPOLINE 218#ifdef CONFIG_RETPOLINE
219static bool spectre_v2_bad_module; 219static bool spectre_v2_bad_module;
220 220
221bool retpoline_module_ok(bool has_retpoline) 221bool retpoline_module_ok(bool has_retpoline)
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 672c7225cb1b..6ce290c506d9 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
784 quirk_no_way_out(i, m, regs); 784 quirk_no_way_out(i, m, regs);
785 785
786 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { 786 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
787 m->bank = i;
787 mce_read_aux(m, i); 788 mce_read_aux(m, i);
788 *msg = tmp; 789 *msg = tmp;
789 return 1; 790 return 1;
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 51adde0a0f1a..e1f3ba19ba54 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -855,7 +855,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
855 if (!p) { 855 if (!p) {
856 return ret; 856 return ret;
857 } else { 857 } else {
858 if (boot_cpu_data.microcode == p->patch_id) 858 if (boot_cpu_data.microcode >= p->patch_id)
859 return ret; 859 return ret;
860 860
861 ret = UCODE_NEW; 861 ret = UCODE_NEW;
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile
index 6895049ceef7..4a06c37b9cf1 100644
--- a/arch/x86/kernel/cpu/resctrl/Makefile
+++ b/arch/x86/kernel/cpu/resctrl/Makefile
@@ -1,4 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o 2obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o
3obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o 3obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o
4CFLAGS_pseudo_lock.o = -I$(src) 4CFLAGS_pseudo_lock.o = -I$(src)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index c8b07d8ea5a2..17ffc869cab8 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -470,6 +470,7 @@ int crash_load_segments(struct kimage *image)
470 470
471 kbuf.memsz = kbuf.bufsz; 471 kbuf.memsz = kbuf.bufsz;
472 kbuf.buf_align = ELF_CORE_HEADER_ALIGN; 472 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
473 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
473 ret = kexec_add_buffer(&kbuf); 474 ret = kexec_add_buffer(&kbuf);
474 if (ret) { 475 if (ret) {
475 vfree((void *)image->arch.elf_headers); 476 vfree((void *)image->arch.elf_headers);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index b0acb22e5a46..dfd3aca82c61 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -21,10 +21,6 @@
21 21
22#define HPET_MASK CLOCKSOURCE_MASK(32) 22#define HPET_MASK CLOCKSOURCE_MASK(32)
23 23
24/* FSEC = 10^-15
25 NSEC = 10^-9 */
26#define FSEC_PER_NSEC 1000000L
27
28#define HPET_DEV_USED_BIT 2 24#define HPET_DEV_USED_BIT 2
29#define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) 25#define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
30#define HPET_DEV_VALID 0x8 26#define HPET_DEV_VALID 0x8
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 278cd07228dd..53917a3ebf94 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
167 struct efi_info *current_ei = &boot_params.efi_info; 167 struct efi_info *current_ei = &boot_params.efi_info;
168 struct efi_info *ei = &params->efi_info; 168 struct efi_info *ei = &params->efi_info;
169 169
170 if (!efi_enabled(EFI_RUNTIME_SERVICES))
171 return 0;
172
170 if (!current_ei->efi_memmap_size) 173 if (!current_ei->efi_memmap_size)
171 return 0; 174 return 0;
172 175
@@ -434,6 +437,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
434 kbuf.memsz = PAGE_ALIGN(header->init_size); 437 kbuf.memsz = PAGE_ALIGN(header->init_size);
435 kbuf.buf_align = header->kernel_alignment; 438 kbuf.buf_align = header->kernel_alignment;
436 kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; 439 kbuf.buf_min = MIN_KERNEL_LOAD_ADDR;
440 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
437 ret = kexec_add_buffer(&kbuf); 441 ret = kexec_add_buffer(&kbuf);
438 if (ret) 442 if (ret)
439 goto out_free_params; 443 goto out_free_params;
@@ -448,6 +452,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
448 kbuf.bufsz = kbuf.memsz = initrd_len; 452 kbuf.bufsz = kbuf.memsz = initrd_len;
449 kbuf.buf_align = PAGE_SIZE; 453 kbuf.buf_align = PAGE_SIZE;
450 kbuf.buf_min = MIN_INITRD_LOAD_ADDR; 454 kbuf.buf_min = MIN_INITRD_LOAD_ADDR;
455 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
451 ret = kexec_add_buffer(&kbuf); 456 ret = kexec_add_buffer(&kbuf);
452 if (ret) 457 if (ret)
453 goto out_free_params; 458 goto out_free_params;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index ba4bfb7f6a36..5c93a65ee1e5 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
457#else 457#else
458 u64 ipi_bitmap = 0; 458 u64 ipi_bitmap = 0;
459#endif 459#endif
460 long ret;
460 461
461 if (cpumask_empty(mask)) 462 if (cpumask_empty(mask))
462 return; 463 return;
@@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
482 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { 483 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
483 max = apic_id < max ? max : apic_id; 484 max = apic_id < max ? max : apic_id;
484 } else { 485 } else {
485 kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, 486 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
486 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); 487 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
488 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
487 min = max = apic_id; 489 min = max = apic_id;
488 ipi_bitmap = 0; 490 ipi_bitmap = 0;
489 } 491 }
@@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
491 } 493 }
492 494
493 if (ipi_bitmap) { 495 if (ipi_bitmap) {
494 kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, 496 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
495 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); 497 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
498 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
496 } 499 }
497 500
498 local_irq_restore(flags); 501 local_irq_restore(flags);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index e9f777bfed40..3fae23834069 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -297,15 +297,16 @@ static int __init tsc_setup(char *str)
297 297
298__setup("tsc=", tsc_setup); 298__setup("tsc=", tsc_setup);
299 299
300#define MAX_RETRIES 5 300#define MAX_RETRIES 5
301#define SMI_TRESHOLD 50000 301#define TSC_DEFAULT_THRESHOLD 0x20000
302 302
303/* 303/*
304 * Read TSC and the reference counters. Take care of SMI disturbance 304 * Read TSC and the reference counters. Take care of any disturbances
305 */ 305 */
306static u64 tsc_read_refs(u64 *p, int hpet) 306static u64 tsc_read_refs(u64 *p, int hpet)
307{ 307{
308 u64 t1, t2; 308 u64 t1, t2;
309 u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
309 int i; 310 int i;
310 311
311 for (i = 0; i < MAX_RETRIES; i++) { 312 for (i = 0; i < MAX_RETRIES; i++) {
@@ -315,7 +316,7 @@ static u64 tsc_read_refs(u64 *p, int hpet)
315 else 316 else
316 *p = acpi_pm_read_early(); 317 *p = acpi_pm_read_early();
317 t2 = get_cycles(); 318 t2 = get_cycles();
318 if ((t2 - t1) < SMI_TRESHOLD) 319 if ((t2 - t1) < thresh)
319 return t2; 320 return t2;
320 } 321 }
321 return ULLONG_MAX; 322 return ULLONG_MAX;
@@ -703,15 +704,15 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
703 * zero. In each wait loop iteration we read the TSC and check 704 * zero. In each wait loop iteration we read the TSC and check
704 * the delta to the previous read. We keep track of the min 705 * the delta to the previous read. We keep track of the min
705 * and max values of that delta. The delta is mostly defined 706 * and max values of that delta. The delta is mostly defined
706 * by the IO time of the PIT access, so we can detect when a 707 * by the IO time of the PIT access, so we can detect when
707 * SMI/SMM disturbance happened between the two reads. If the 708 * any disturbance happened between the two reads. If the
708 * maximum time is significantly larger than the minimum time, 709 * maximum time is significantly larger than the minimum time,
709 * then we discard the result and have another try. 710 * then we discard the result and have another try.
710 * 711 *
711 * 2) Reference counter. If available we use the HPET or the 712 * 2) Reference counter. If available we use the HPET or the
712 * PMTIMER as a reference to check the sanity of that value. 713 * PMTIMER as a reference to check the sanity of that value.
713 * We use separate TSC readouts and check inside of the 714 * We use separate TSC readouts and check inside of the
714 * reference read for a SMI/SMM disturbance. We dicard 715 * reference read for any possible disturbance. We dicard
715 * disturbed values here as well. We do that around the PIT 716 * disturbed values here as well. We do that around the PIT
716 * calibration delay loop as we have to wait for a certain 717 * calibration delay loop as we have to wait for a certain
717 * amount of time anyway. 718 * amount of time anyway.
@@ -744,7 +745,7 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
744 if (ref1 == ref2) 745 if (ref1 == ref2)
745 continue; 746 continue;
746 747
747 /* Check, whether the sampling was disturbed by an SMI */ 748 /* Check, whether the sampling was disturbed */
748 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) 749 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
749 continue; 750 continue;
750 751
@@ -1268,7 +1269,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1268 */ 1269 */
1269static void tsc_refine_calibration_work(struct work_struct *work) 1270static void tsc_refine_calibration_work(struct work_struct *work)
1270{ 1271{
1271 static u64 tsc_start = -1, ref_start; 1272 static u64 tsc_start = ULLONG_MAX, ref_start;
1272 static int hpet; 1273 static int hpet;
1273 u64 tsc_stop, ref_stop, delta; 1274 u64 tsc_stop, ref_stop, delta;
1274 unsigned long freq; 1275 unsigned long freq;
@@ -1283,14 +1284,15 @@ static void tsc_refine_calibration_work(struct work_struct *work)
1283 * delayed the first time we expire. So set the workqueue 1284 * delayed the first time we expire. So set the workqueue
1284 * again once we know timers are working. 1285 * again once we know timers are working.
1285 */ 1286 */
1286 if (tsc_start == -1) { 1287 if (tsc_start == ULLONG_MAX) {
1288restart:
1287 /* 1289 /*
1288 * Only set hpet once, to avoid mixing hardware 1290 * Only set hpet once, to avoid mixing hardware
1289 * if the hpet becomes enabled later. 1291 * if the hpet becomes enabled later.
1290 */ 1292 */
1291 hpet = is_hpet_enabled(); 1293 hpet = is_hpet_enabled();
1292 schedule_delayed_work(&tsc_irqwork, HZ);
1293 tsc_start = tsc_read_refs(&ref_start, hpet); 1294 tsc_start = tsc_read_refs(&ref_start, hpet);
1295 schedule_delayed_work(&tsc_irqwork, HZ);
1294 return; 1296 return;
1295 } 1297 }
1296 1298
@@ -1300,9 +1302,9 @@ static void tsc_refine_calibration_work(struct work_struct *work)
1300 if (ref_start == ref_stop) 1302 if (ref_start == ref_stop)
1301 goto out; 1303 goto out;
1302 1304
1303 /* Check, whether the sampling was disturbed by an SMI */ 1305 /* Check, whether the sampling was disturbed */
1304 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) 1306 if (tsc_stop == ULLONG_MAX)
1305 goto out; 1307 goto restart;
1306 1308
1307 delta = tsc_stop - tsc_start; 1309 delta = tsc_stop - tsc_start;
1308 delta *= 1000000LL; 1310 delta *= 1000000LL;
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 69b3a7c30013..31ecf7a76d5a 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -2,10 +2,6 @@
2 2
3ccflags-y += -Iarch/x86/kvm 3ccflags-y += -Iarch/x86/kvm
4 4
5CFLAGS_x86.o := -I.
6CFLAGS_svm.o := -I.
7CFLAGS_vmx.o := -I.
8
9KVM := ../../../virt/kvm 5KVM := ../../../virt/kvm
10 6
11kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ 7kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index bbffa6c54697..c07958b59f50 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -335,6 +335,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
335 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; 335 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
336 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; 336 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
337 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; 337 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
338 unsigned f_la57 = 0;
338 339
339 /* cpuid 1.edx */ 340 /* cpuid 1.edx */
340 const u32 kvm_cpuid_1_edx_x86_features = 341 const u32 kvm_cpuid_1_edx_x86_features =
@@ -489,7 +490,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
489 // TSC_ADJUST is emulated 490 // TSC_ADJUST is emulated
490 entry->ebx |= F(TSC_ADJUST); 491 entry->ebx |= F(TSC_ADJUST);
491 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; 492 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
493 f_la57 = entry->ecx & F(LA57);
492 cpuid_mask(&entry->ecx, CPUID_7_ECX); 494 cpuid_mask(&entry->ecx, CPUID_7_ECX);
495 /* Set LA57 based on hardware capability. */
496 entry->ecx |= f_la57;
493 entry->ecx |= f_umip; 497 entry->ecx |= f_umip;
494 /* PKU is not yet implemented for shadow paging. */ 498 /* PKU is not yet implemented for shadow paging. */
495 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) 499 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index c90a5352d158..89d20ed1d2e8 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1636 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); 1636 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
1637 if (ret != HV_STATUS_INVALID_PORT_ID) 1637 if (ret != HV_STATUS_INVALID_PORT_ID)
1638 break; 1638 break;
1639 /* maybe userspace knows this conn_id: fall through */ 1639 /* fall through - maybe userspace knows this conn_id. */
1640 case HVCALL_POST_MESSAGE: 1640 case HVCALL_POST_MESSAGE:
1641 /* don't bother userspace if it has no way to handle it */ 1641 /* don't bother userspace if it has no way to handle it */
1642 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { 1642 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
@@ -1832,7 +1832,6 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1832 ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE; 1832 ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
1833 ent->eax |= HV_X64_MSR_RESET_AVAILABLE; 1833 ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
1834 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; 1834 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
1835 ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE;
1836 ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS; 1835 ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
1837 ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT; 1836 ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
1838 1837
@@ -1848,11 +1847,11 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1848 case HYPERV_CPUID_ENLIGHTMENT_INFO: 1847 case HYPERV_CPUID_ENLIGHTMENT_INFO:
1849 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; 1848 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
1850 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; 1849 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
1851 ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED;
1852 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; 1850 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
1853 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; 1851 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
1854 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; 1852 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
1855 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; 1853 if (evmcs_ver)
1854 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
1856 1855
1857 /* 1856 /*
1858 * Default number of spinlock retry attempts, matches 1857 * Default number of spinlock retry attempts, matches
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9f089e2e09d0..4b6c2da7265c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1035 switch (delivery_mode) { 1035 switch (delivery_mode) {
1036 case APIC_DM_LOWEST: 1036 case APIC_DM_LOWEST:
1037 vcpu->arch.apic_arb_prio++; 1037 vcpu->arch.apic_arb_prio++;
1038 /* fall through */
1038 case APIC_DM_FIXED: 1039 case APIC_DM_FIXED:
1039 if (unlikely(trig_mode && !level)) 1040 if (unlikely(trig_mode && !level))
1040 break; 1041 break;
@@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1874 1875
1875 case APIC_LVT0: 1876 case APIC_LVT0:
1876 apic_manage_nmi_watchdog(apic, val); 1877 apic_manage_nmi_watchdog(apic, val);
1878 /* fall through */
1877 case APIC_LVTTHMR: 1879 case APIC_LVTTHMR:
1878 case APIC_LVTPC: 1880 case APIC_LVTPC:
1879 case APIC_LVT1: 1881 case APIC_LVT1:
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ce770b446238..f2d1d230d5b8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3555 &invalid_list); 3555 &invalid_list);
3556 mmu->root_hpa = INVALID_PAGE; 3556 mmu->root_hpa = INVALID_PAGE;
3557 } 3557 }
3558 mmu->root_cr3 = 0;
3558 } 3559 }
3559 3560
3560 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 3561 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3610 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); 3611 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3611 } else 3612 } else
3612 BUG(); 3613 BUG();
3614 vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
3613 3615
3614 return 0; 3616 return 0;
3615} 3617}
@@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3618{ 3620{
3619 struct kvm_mmu_page *sp; 3621 struct kvm_mmu_page *sp;
3620 u64 pdptr, pm_mask; 3622 u64 pdptr, pm_mask;
3621 gfn_t root_gfn; 3623 gfn_t root_gfn, root_cr3;
3622 int i; 3624 int i;
3623 3625
3624 root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT; 3626 root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
3627 root_gfn = root_cr3 >> PAGE_SHIFT;
3625 3628
3626 if (mmu_check_root(vcpu, root_gfn)) 3629 if (mmu_check_root(vcpu, root_gfn))
3627 return 1; 3630 return 1;
@@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3646 ++sp->root_count; 3649 ++sp->root_count;
3647 spin_unlock(&vcpu->kvm->mmu_lock); 3650 spin_unlock(&vcpu->kvm->mmu_lock);
3648 vcpu->arch.mmu->root_hpa = root; 3651 vcpu->arch.mmu->root_hpa = root;
3649 return 0; 3652 goto set_root_cr3;
3650 } 3653 }
3651 3654
3652 /* 3655 /*
@@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3712 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); 3715 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
3713 } 3716 }
3714 3717
3718set_root_cr3:
3719 vcpu->arch.mmu->root_cr3 = root_cr3;
3720
3715 return 0; 3721 return 0;
3716} 3722}
3717 3723
@@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4163 struct kvm_mmu_root_info root; 4169 struct kvm_mmu_root_info root;
4164 struct kvm_mmu *mmu = vcpu->arch.mmu; 4170 struct kvm_mmu *mmu = vcpu->arch.mmu;
4165 4171
4166 root.cr3 = mmu->get_cr3(vcpu); 4172 root.cr3 = mmu->root_cr3;
4167 root.hpa = mmu->root_hpa; 4173 root.hpa = mmu->root_hpa;
4168 4174
4169 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 4175 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
@@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4176 } 4182 }
4177 4183
4178 mmu->root_hpa = root.hpa; 4184 mmu->root_hpa = root.hpa;
4185 mmu->root_cr3 = root.cr3;
4179 4186
4180 return i < KVM_MMU_NUM_PREV_ROOTS; 4187 return i < KVM_MMU_NUM_PREV_ROOTS;
4181} 4188}
@@ -4371,6 +4378,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4371 rsvd_bits(maxphyaddr, 51); 4378 rsvd_bits(maxphyaddr, 51);
4372 rsvd_check->rsvd_bits_mask[1][4] = 4379 rsvd_check->rsvd_bits_mask[1][4] =
4373 rsvd_check->rsvd_bits_mask[0][4]; 4380 rsvd_check->rsvd_bits_mask[0][4];
4381 /* fall through */
4374 case PT64_ROOT_4LEVEL: 4382 case PT64_ROOT_4LEVEL:
4375 rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | 4383 rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
4376 nonleaf_bit8_rsvd | rsvd_bits(7, 7) | 4384 nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
@@ -4769,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
4769 ext.cr4_pse = !!is_pse(vcpu); 4777 ext.cr4_pse = !!is_pse(vcpu);
4770 ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); 4778 ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
4771 ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); 4779 ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
4780 ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
4772 4781
4773 ext.valid = 1; 4782 ext.valid = 1;
4774 4783
@@ -5515,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
5515 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 5524 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5516 5525
5517 vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; 5526 vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
5527 vcpu->arch.root_mmu.root_cr3 = 0;
5518 vcpu->arch.root_mmu.translate_gpa = translate_gpa; 5528 vcpu->arch.root_mmu.translate_gpa = translate_gpa;
5519 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 5529 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5520 vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; 5530 vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5521 5531
5522 vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; 5532 vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
5533 vcpu->arch.guest_mmu.root_cr3 = 0;
5523 vcpu->arch.guest_mmu.translate_gpa = translate_gpa; 5534 vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
5524 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 5535 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5525 vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; 5536 vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 307e5bddb6d9..f13a3a24d360 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3414,6 +3414,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
3414 kvm_mmu_reset_context(&svm->vcpu); 3414 kvm_mmu_reset_context(&svm->vcpu);
3415 kvm_mmu_load(&svm->vcpu); 3415 kvm_mmu_load(&svm->vcpu);
3416 3416
3417 /*
3418 * Drop what we picked up for L2 via svm_complete_interrupts() so it
3419 * doesn't end up in L1.
3420 */
3421 svm->vcpu.arch.nmi_injected = false;
3422 kvm_clear_exception_queue(&svm->vcpu);
3423 kvm_clear_interrupt_queue(&svm->vcpu);
3424
3417 return 0; 3425 return 0;
3418} 3426}
3419 3427
@@ -4395,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4395 case MSR_IA32_APICBASE: 4403 case MSR_IA32_APICBASE:
4396 if (kvm_vcpu_apicv_active(vcpu)) 4404 if (kvm_vcpu_apicv_active(vcpu))
4397 avic_update_vapic_bar(to_svm(vcpu), data); 4405 avic_update_vapic_bar(to_svm(vcpu), data);
4398 /* Follow through */ 4406 /* Fall through */
4399 default: 4407 default:
4400 return kvm_set_msr_common(vcpu, msr); 4408 return kvm_set_msr_common(vcpu, msr);
4401 } 4409 }
@@ -4504,28 +4512,19 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4504 kvm_lapic_reg_write(apic, APIC_ICR, icrl); 4512 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4505 break; 4513 break;
4506 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { 4514 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4507 int i;
4508 struct kvm_vcpu *vcpu;
4509 struct kvm *kvm = svm->vcpu.kvm;
4510 struct kvm_lapic *apic = svm->vcpu.arch.apic; 4515 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4511 4516
4512 /* 4517 /*
4513 * At this point, we expect that the AVIC HW has already 4518 * Update ICR high and low, then emulate sending IPI,
4514 * set the appropriate IRR bits on the valid target 4519 * which is handled when writing APIC_ICR.
4515 * vcpus. So, we just need to kick the appropriate vcpu.
4516 */ 4520 */
4517 kvm_for_each_vcpu(i, vcpu, kvm) { 4521 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4518 bool m = kvm_apic_match_dest(vcpu, apic, 4522 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4519 icrl & KVM_APIC_SHORT_MASK,
4520 GET_APIC_DEST_FIELD(icrh),
4521 icrl & KVM_APIC_DEST_MASK);
4522
4523 if (m && !avic_vcpu_is_running(vcpu))
4524 kvm_vcpu_wake_up(vcpu);
4525 }
4526 break; 4523 break;
4527 } 4524 }
4528 case AVIC_IPI_FAILURE_INVALID_TARGET: 4525 case AVIC_IPI_FAILURE_INVALID_TARGET:
4526 WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
4527 index, svm->vcpu.vcpu_id, icrh, icrl);
4529 break; 4528 break;
4530 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: 4529 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4531 WARN_ONCE(1, "Invalid backing page\n"); 4530 WARN_ONCE(1, "Invalid backing page\n");
@@ -6278,6 +6277,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
6278 int asid, ret; 6277 int asid, ret;
6279 6278
6280 ret = -EBUSY; 6279 ret = -EBUSY;
6280 if (unlikely(sev->active))
6281 return ret;
6282
6281 asid = sev_asid_new(); 6283 asid = sev_asid_new();
6282 if (asid < 0) 6284 if (asid < 0)
6283 return ret; 6285 return ret;
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 705f40ae2532..6432d08c7de7 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1465,7 +1465,7 @@ TRACE_EVENT(kvm_hv_send_ipi_ex,
1465#endif /* _TRACE_KVM_H */ 1465#endif /* _TRACE_KVM_H */
1466 1466
1467#undef TRACE_INCLUDE_PATH 1467#undef TRACE_INCLUDE_PATH
1468#define TRACE_INCLUDE_PATH arch/x86/kvm 1468#define TRACE_INCLUDE_PATH ../../arch/x86/kvm
1469#undef TRACE_INCLUDE_FILE 1469#undef TRACE_INCLUDE_FILE
1470#define TRACE_INCLUDE_FILE trace 1470#define TRACE_INCLUDE_FILE trace
1471 1471
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index 95bc2247478d..5466c6d85cf3 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -332,16 +332,17 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
332 uint16_t *vmcs_version) 332 uint16_t *vmcs_version)
333{ 333{
334 struct vcpu_vmx *vmx = to_vmx(vcpu); 334 struct vcpu_vmx *vmx = to_vmx(vcpu);
335 bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled;
336
337 vmx->nested.enlightened_vmcs_enabled = true;
335 338
336 if (vmcs_version) 339 if (vmcs_version)
337 *vmcs_version = nested_get_evmcs_version(vcpu); 340 *vmcs_version = nested_get_evmcs_version(vcpu);
338 341
339 /* We don't support disabling the feature for simplicity. */ 342 /* We don't support disabling the feature for simplicity. */
340 if (vmx->nested.enlightened_vmcs_enabled) 343 if (evmcs_already_enabled)
341 return 0; 344 return 0;
342 345
343 vmx->nested.enlightened_vmcs_enabled = true;
344
345 vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; 346 vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
346 vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; 347 vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
347 vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; 348 vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 3170e291215d..d737a51a53ca 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -55,7 +55,7 @@ static u16 shadow_read_write_fields[] = {
55static int max_shadow_read_write_fields = 55static int max_shadow_read_write_fields =
56 ARRAY_SIZE(shadow_read_write_fields); 56 ARRAY_SIZE(shadow_read_write_fields);
57 57
58void init_vmcs_shadow_fields(void) 58static void init_vmcs_shadow_fields(void)
59{ 59{
60 int i, j; 60 int i, j;
61 61
@@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
212 return; 212 return;
213 213
214 hrtimer_cancel(&vmx->nested.preemption_timer);
214 vmx->nested.vmxon = false; 215 vmx->nested.vmxon = false;
215 vmx->nested.smm.vmxon = false; 216 vmx->nested.smm.vmxon = false;
216 free_vpid(vmx->nested.vpid02); 217 free_vpid(vmx->nested.vpid02);
@@ -2472,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2472 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2473 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2473 return -EINVAL; 2474 return -EINVAL;
2474 2475
2476 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2477 nested_cpu_has_save_preemption_timer(vmcs12))
2478 return -EINVAL;
2479
2475 if (nested_cpu_has_ept(vmcs12) && 2480 if (nested_cpu_has_ept(vmcs12) &&
2476 !valid_ept_address(vcpu, vmcs12->ept_pointer)) 2481 !valid_ept_address(vcpu, vmcs12->ept_pointer))
2477 return -EINVAL; 2482 return -EINVAL;
@@ -4140,11 +4145,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4140 if (r < 0) 4145 if (r < 0)
4141 goto out_vmcs02; 4146 goto out_vmcs02;
4142 4147
4143 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); 4148 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
4144 if (!vmx->nested.cached_vmcs12) 4149 if (!vmx->nested.cached_vmcs12)
4145 goto out_cached_vmcs12; 4150 goto out_cached_vmcs12;
4146 4151
4147 vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); 4152 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
4148 if (!vmx->nested.cached_shadow_vmcs12) 4153 if (!vmx->nested.cached_shadow_vmcs12)
4149 goto out_cached_shadow_vmcs12; 4154 goto out_cached_shadow_vmcs12;
4150 4155
@@ -4540,9 +4545,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
4540 * given physical address won't match the required 4545 * given physical address won't match the required
4541 * VMCS12_REVISION identifier. 4546 * VMCS12_REVISION identifier.
4542 */ 4547 */
4543 nested_vmx_failValid(vcpu, 4548 return nested_vmx_failValid(vcpu,
4544 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 4549 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4545 return kvm_skip_emulated_instruction(vcpu);
4546 } 4550 }
4547 new_vmcs12 = kmap(page); 4551 new_vmcs12 = kmap(page);
4548 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || 4552 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
@@ -5264,13 +5268,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5264 copy_shadow_to_vmcs12(vmx); 5268 copy_shadow_to_vmcs12(vmx);
5265 } 5269 }
5266 5270
5267 if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) 5271 /*
5272 * Copy over the full allocated size of vmcs12 rather than just the size
5273 * of the struct.
5274 */
5275 if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
5268 return -EFAULT; 5276 return -EFAULT;
5269 5277
5270 if (nested_cpu_has_shadow_vmcs(vmcs12) && 5278 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5271 vmcs12->vmcs_link_pointer != -1ull) { 5279 vmcs12->vmcs_link_pointer != -1ull) {
5272 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, 5280 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
5273 get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) 5281 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
5274 return -EFAULT; 5282 return -EFAULT;
5275 } 5283 }
5276 5284
@@ -5553,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
5553 * secondary cpu-based controls. Do not include those that 5561 * secondary cpu-based controls. Do not include those that
5554 * depend on CPUID bits, they are added later by vmx_cpuid_update. 5562 * depend on CPUID bits, they are added later by vmx_cpuid_update.
5555 */ 5563 */
5556 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 5564 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
5557 msrs->secondary_ctls_low, 5565 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
5558 msrs->secondary_ctls_high); 5566 msrs->secondary_ctls_low,
5567 msrs->secondary_ctls_high);
5568
5559 msrs->secondary_ctls_low = 0; 5569 msrs->secondary_ctls_low = 0;
5560 msrs->secondary_ctls_high &= 5570 msrs->secondary_ctls_high &=
5561 SECONDARY_EXEC_DESC | 5571 SECONDARY_EXEC_DESC |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4d39f731bc33..30a6bcd735ec 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -26,6 +26,7 @@
26#include <linux/mod_devicetable.h> 26#include <linux/mod_devicetable.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/sched/smt.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/tboot.h> 31#include <linux/tboot.h>
31#include <linux/trace_events.h> 32#include <linux/trace_events.h>
@@ -423,7 +424,7 @@ static void check_ept_pointer_match(struct kvm *kvm)
423 to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; 424 to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
424} 425}
425 426
426int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, 427static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
427 void *data) 428 void *data)
428{ 429{
429 struct kvm_tlb_range *range = data; 430 struct kvm_tlb_range *range = data;
@@ -453,7 +454,7 @@ static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
453 struct kvm_tlb_range *range) 454 struct kvm_tlb_range *range)
454{ 455{
455 struct kvm_vcpu *vcpu; 456 struct kvm_vcpu *vcpu;
456 int ret = -ENOTSUPP, i; 457 int ret = 0, i;
457 458
458 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); 459 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
459 460
@@ -862,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
862 if (!entry_only) 863 if (!entry_only)
863 j = find_msr(&m->host, msr); 864 j = find_msr(&m->host, msr);
864 865
865 if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { 866 if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
867 (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
866 printk_once(KERN_WARNING "Not enough msr switch entries. " 868 printk_once(KERN_WARNING "Not enough msr switch entries. "
867 "Can't add msr %x\n", msr); 869 "Can't add msr %x\n", msr);
868 return; 870 return;
@@ -1192,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
1192 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) 1194 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
1193 return; 1195 return;
1194 1196
1195 /*
1196 * First handle the simple case where no cmpxchg is necessary; just
1197 * allow posting non-urgent interrupts.
1198 *
1199 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
1200 * PI.NDST: pi_post_block will do it for us and the wakeup_handler
1201 * expects the VCPU to be on the blocked_vcpu_list that matches
1202 * PI.NDST.
1203 */
1204 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
1205 vcpu->cpu == cpu) {
1206 pi_clear_sn(pi_desc);
1207 return;
1208 }
1209
1210 /* The full case. */ 1197 /* The full case. */
1211 do { 1198 do {
1212 old.control = new.control = pi_desc->control; 1199 old.control = new.control = pi_desc->control;
@@ -1221,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
1221 new.sn = 0; 1208 new.sn = 0;
1222 } while (cmpxchg64(&pi_desc->control, old.control, 1209 } while (cmpxchg64(&pi_desc->control, old.control,
1223 new.control) != old.control); 1210 new.control) != old.control);
1211
1212 /*
1213 * Clear SN before reading the bitmap. The VT-d firmware
1214 * writes the bitmap and reads SN atomically (5.2.3 in the
1215 * spec), so it doesn't really have a memory barrier that
1216 * pairs with this, but we cannot do that and we need one.
1217 */
1218 smp_mb__after_atomic();
1219
1220 if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS))
1221 pi_set_on(pi_desc);
1224} 1222}
1225 1223
1226/* 1224/*
@@ -1773,7 +1771,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1773 if (!msr_info->host_initiated && 1771 if (!msr_info->host_initiated &&
1774 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) 1772 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
1775 return 1; 1773 return 1;
1776 /* Otherwise falls through */ 1774 /* Else, falls through */
1777 default: 1775 default:
1778 msr = find_msr_entry(vmx, msr_info->index); 1776 msr = find_msr_entry(vmx, msr_info->index);
1779 if (msr) { 1777 if (msr) {
@@ -2014,7 +2012,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2014 /* Check reserved bit, higher 32 bits should be zero */ 2012 /* Check reserved bit, higher 32 bits should be zero */
2015 if ((data >> 32) != 0) 2013 if ((data >> 32) != 0)
2016 return 1; 2014 return 1;
2017 /* Otherwise falls through */ 2015 /* Else, falls through */
2018 default: 2016 default:
2019 msr = find_msr_entry(vmx, msr_index); 2017 msr = find_msr_entry(vmx, msr_index);
2020 if (msr) { 2018 if (msr) {
@@ -2344,7 +2342,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
2344 case 37: /* AAT100 */ 2342 case 37: /* AAT100 */
2345 case 44: /* BC86,AAY89,BD102 */ 2343 case 44: /* BC86,AAY89,BD102 */
2346 case 46: /* BA97 */ 2344 case 46: /* BA97 */
2347 _vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 2345 _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
2348 _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 2346 _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
2349 pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " 2347 pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2350 "does not work properly. Using workaround\n"); 2348 "does not work properly. Using workaround\n");
@@ -6362,72 +6360,9 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
6362 vmx->loaded_vmcs->hv_timer_armed = false; 6360 vmx->loaded_vmcs->hv_timer_armed = false;
6363} 6361}
6364 6362
6365static void vmx_vcpu_run(struct kvm_vcpu *vcpu) 6363static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
6366{ 6364{
6367 struct vcpu_vmx *vmx = to_vmx(vcpu); 6365 unsigned long evmcs_rsp;
6368 unsigned long cr3, cr4, evmcs_rsp;
6369
6370 /* Record the guest's net vcpu time for enforced NMI injections. */
6371 if (unlikely(!enable_vnmi &&
6372 vmx->loaded_vmcs->soft_vnmi_blocked))
6373 vmx->loaded_vmcs->entry_time = ktime_get();
6374
6375 /* Don't enter VMX if guest state is invalid, let the exit handler
6376 start emulation until we arrive back to a valid state */
6377 if (vmx->emulation_required)
6378 return;
6379
6380 if (vmx->ple_window_dirty) {
6381 vmx->ple_window_dirty = false;
6382 vmcs_write32(PLE_WINDOW, vmx->ple_window);
6383 }
6384
6385 if (vmx->nested.need_vmcs12_sync)
6386 nested_sync_from_vmcs12(vcpu);
6387
6388 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
6389 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
6390 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
6391 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
6392
6393 cr3 = __get_current_cr3_fast();
6394 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
6395 vmcs_writel(HOST_CR3, cr3);
6396 vmx->loaded_vmcs->host_state.cr3 = cr3;
6397 }
6398
6399 cr4 = cr4_read_shadow();
6400 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
6401 vmcs_writel(HOST_CR4, cr4);
6402 vmx->loaded_vmcs->host_state.cr4 = cr4;
6403 }
6404
6405 /* When single-stepping over STI and MOV SS, we must clear the
6406 * corresponding interruptibility bits in the guest state. Otherwise
6407 * vmentry fails as it then expects bit 14 (BS) in pending debug
6408 * exceptions being set, but that's not correct for the guest debugging
6409 * case. */
6410 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6411 vmx_set_interrupt_shadow(vcpu, 0);
6412
6413 if (static_cpu_has(X86_FEATURE_PKU) &&
6414 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
6415 vcpu->arch.pkru != vmx->host_pkru)
6416 __write_pkru(vcpu->arch.pkru);
6417
6418 pt_guest_enter(vmx);
6419
6420 atomic_switch_perf_msrs(vmx);
6421
6422 vmx_update_hv_timer(vcpu);
6423
6424 /*
6425 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
6426 * it's non-zero. Since vmentry is serialising on affected CPUs, there
6427 * is no need to worry about the conditional branch over the wrmsr
6428 * being speculatively taken.
6429 */
6430 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
6431 6366
6432 vmx->__launched = vmx->loaded_vmcs->launched; 6367 vmx->__launched = vmx->loaded_vmcs->launched;
6433 6368
@@ -6567,6 +6502,77 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
6567 , "eax", "ebx", "edi" 6502 , "eax", "ebx", "edi"
6568#endif 6503#endif
6569 ); 6504 );
6505}
6506STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
6507
6508static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
6509{
6510 struct vcpu_vmx *vmx = to_vmx(vcpu);
6511 unsigned long cr3, cr4;
6512
6513 /* Record the guest's net vcpu time for enforced NMI injections. */
6514 if (unlikely(!enable_vnmi &&
6515 vmx->loaded_vmcs->soft_vnmi_blocked))
6516 vmx->loaded_vmcs->entry_time = ktime_get();
6517
6518 /* Don't enter VMX if guest state is invalid, let the exit handler
6519 start emulation until we arrive back to a valid state */
6520 if (vmx->emulation_required)
6521 return;
6522
6523 if (vmx->ple_window_dirty) {
6524 vmx->ple_window_dirty = false;
6525 vmcs_write32(PLE_WINDOW, vmx->ple_window);
6526 }
6527
6528 if (vmx->nested.need_vmcs12_sync)
6529 nested_sync_from_vmcs12(vcpu);
6530
6531 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
6532 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
6533 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
6534 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
6535
6536 cr3 = __get_current_cr3_fast();
6537 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
6538 vmcs_writel(HOST_CR3, cr3);
6539 vmx->loaded_vmcs->host_state.cr3 = cr3;
6540 }
6541
6542 cr4 = cr4_read_shadow();
6543 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
6544 vmcs_writel(HOST_CR4, cr4);
6545 vmx->loaded_vmcs->host_state.cr4 = cr4;
6546 }
6547
6548 /* When single-stepping over STI and MOV SS, we must clear the
6549 * corresponding interruptibility bits in the guest state. Otherwise
6550 * vmentry fails as it then expects bit 14 (BS) in pending debug
6551 * exceptions being set, but that's not correct for the guest debugging
6552 * case. */
6553 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6554 vmx_set_interrupt_shadow(vcpu, 0);
6555
6556 if (static_cpu_has(X86_FEATURE_PKU) &&
6557 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
6558 vcpu->arch.pkru != vmx->host_pkru)
6559 __write_pkru(vcpu->arch.pkru);
6560
6561 pt_guest_enter(vmx);
6562
6563 atomic_switch_perf_msrs(vmx);
6564
6565 vmx_update_hv_timer(vcpu);
6566
6567 /*
6568 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
6569 * it's non-zero. Since vmentry is serialising on affected CPUs, there
6570 * is no need to worry about the conditional branch over the wrmsr
6571 * being speculatively taken.
6572 */
6573 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
6574
6575 __vmx_vcpu_run(vcpu, vmx);
6570 6576
6571 /* 6577 /*
6572 * We do not use IBRS in the kernel. If this vCPU has used the 6578 * We do not use IBRS in the kernel. If this vCPU has used the
@@ -6648,7 +6654,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
6648 vmx_recover_nmi_blocking(vmx); 6654 vmx_recover_nmi_blocking(vmx);
6649 vmx_complete_interrupts(vmx); 6655 vmx_complete_interrupts(vmx);
6650} 6656}
6651STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
6652 6657
6653static struct kvm *vmx_vm_alloc(void) 6658static struct kvm *vmx_vm_alloc(void)
6654{ 6659{
@@ -6816,7 +6821,7 @@ static int vmx_vm_init(struct kvm *kvm)
6816 * Warn upon starting the first VM in a potentially 6821 * Warn upon starting the first VM in a potentially
6817 * insecure environment. 6822 * insecure environment.
6818 */ 6823 */
6819 if (cpu_smt_control == CPU_SMT_ENABLED) 6824 if (sched_smt_active())
6820 pr_warn_once(L1TF_MSG_SMT); 6825 pr_warn_once(L1TF_MSG_SMT);
6821 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 6826 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
6822 pr_warn_once(L1TF_MSG_L1D); 6827 pr_warn_once(L1TF_MSG_L1D);
@@ -7044,7 +7049,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
7044 7049
7045 /* unmask address range configure area */ 7050 /* unmask address range configure area */
7046 for (i = 0; i < vmx->pt_desc.addr_range; i++) 7051 for (i = 0; i < vmx->pt_desc.addr_range; i++)
7047 vmx->pt_desc.ctl_bitmask &= ~(0xf << (32 + i * 4)); 7052 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7048} 7053}
7049 7054
7050static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 7055static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 99328954c2fc..0ac0a64c7790 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
337 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); 337 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
338} 338}
339 339
340static inline void pi_clear_sn(struct pi_desc *pi_desc) 340static inline void pi_set_sn(struct pi_desc *pi_desc)
341{ 341{
342 return clear_bit(POSTED_INTR_SN, 342 return set_bit(POSTED_INTR_SN,
343 (unsigned long *)&pi_desc->control); 343 (unsigned long *)&pi_desc->control);
344} 344}
345 345
346static inline void pi_set_sn(struct pi_desc *pi_desc) 346static inline void pi_set_on(struct pi_desc *pi_desc)
347{ 347{
348 return set_bit(POSTED_INTR_SN, 348 set_bit(POSTED_INTR_ON,
349 (unsigned long *)&pi_desc->control); 349 (unsigned long *)&pi_desc->control);
350} 350}
351 351
352static inline void pi_clear_on(struct pi_desc *pi_desc) 352static inline void pi_clear_on(struct pi_desc *pi_desc)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 02c8e095a239..941f932373d0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3834 case KVM_CAP_HYPERV_SYNIC2: 3834 case KVM_CAP_HYPERV_SYNIC2:
3835 if (cap->args[0]) 3835 if (cap->args[0])
3836 return -EINVAL; 3836 return -EINVAL;
3837 /* fall through */
3838
3837 case KVM_CAP_HYPERV_SYNIC: 3839 case KVM_CAP_HYPERV_SYNIC:
3838 if (!irqchip_in_kernel(vcpu->kvm)) 3840 if (!irqchip_in_kernel(vcpu->kvm))
3839 return -EINVAL; 3841 return -EINVAL;
@@ -5114,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
5114{ 5116{
5115 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 5117 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5116 5118
5119 /*
5120 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
5121 * is returned, but our callers are not ready for that and they blindly
5122 * call kvm_inject_page_fault. Ensure that they at least do not leak
5123 * uninitialized kernel stack memory into cr2 and error code.
5124 */
5125 memset(exception, 0, sizeof(*exception));
5117 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 5126 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
5118 exception); 5127 exception);
5119} 5128}
@@ -6480,8 +6489,7 @@ restart:
6480 toggle_interruptibility(vcpu, ctxt->interruptibility); 6489 toggle_interruptibility(vcpu, ctxt->interruptibility);
6481 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6490 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6482 kvm_rip_write(vcpu, ctxt->eip); 6491 kvm_rip_write(vcpu, ctxt->eip);
6483 if (r == EMULATE_DONE && 6492 if (r == EMULATE_DONE && ctxt->tf)
6484 (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
6485 kvm_vcpu_do_singlestep(vcpu, &r); 6493 kvm_vcpu_do_singlestep(vcpu, &r);
6486 if (!ctxt->have_exception || 6494 if (!ctxt->have_exception ||
6487 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 6495 exception_type(ctxt->exception.vector) == EXCPT_TRAP)
@@ -7093,10 +7101,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
7093 case KVM_HC_CLOCK_PAIRING: 7101 case KVM_HC_CLOCK_PAIRING:
7094 ret = kvm_pv_clock_pairing(vcpu, a0, a1); 7102 ret = kvm_pv_clock_pairing(vcpu, a0, a1);
7095 break; 7103 break;
7104#endif
7096 case KVM_HC_SEND_IPI: 7105 case KVM_HC_SEND_IPI:
7097 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); 7106 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
7098 break; 7107 break;
7099#endif
7100 default: 7108 default:
7101 ret = -KVM_ENOSYS; 7109 ret = -KVM_ENOSYS;
7102 break; 7110 break;
@@ -7793,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
7793 * 1) We should set ->mode before checking ->requests. Please see 7801 * 1) We should set ->mode before checking ->requests. Please see
7794 * the comment in kvm_vcpu_exiting_guest_mode(). 7802 * the comment in kvm_vcpu_exiting_guest_mode().
7795 * 7803 *
7796 * 2) For APICv, we should set ->mode before checking PIR.ON. This 7804 * 2) For APICv, we should set ->mode before checking PID.ON. This
7797 * pairs with the memory barrier implicit in pi_test_and_set_on 7805 * pairs with the memory barrier implicit in pi_test_and_set_on
7798 * (see vmx_deliver_posted_interrupt). 7806 * (see vmx_deliver_posted_interrupt).
7799 * 7807 *
@@ -7937,6 +7945,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
7937 vcpu->arch.pv.pv_unhalted = false; 7945 vcpu->arch.pv.pv_unhalted = false;
7938 vcpu->arch.mp_state = 7946 vcpu->arch.mp_state =
7939 KVM_MP_STATE_RUNNABLE; 7947 KVM_MP_STATE_RUNNABLE;
7948 /* fall through */
7940 case KVM_MP_STATE_RUNNABLE: 7949 case KVM_MP_STATE_RUNNABLE:
7941 vcpu->arch.apf.halted = false; 7950 vcpu->arch.apf.halted = false;
7942 break; 7951 break;
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index 66894675f3c8..df50451d94ef 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -2,8 +2,11 @@
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/io.h> 3#include <linux/io.h>
4 4
5#define movs(type,to,from) \
6 asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
7
5/* Originally from i386/string.h */ 8/* Originally from i386/string.h */
6static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) 9static __always_inline void rep_movs(void *to, const void *from, size_t n)
7{ 10{
8 unsigned long d0, d1, d2; 11 unsigned long d0, d1, d2;
9 asm volatile("rep ; movsl\n\t" 12 asm volatile("rep ; movsl\n\t"
@@ -21,13 +24,37 @@ static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n)
21 24
22void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) 25void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
23{ 26{
24 __iomem_memcpy(to, (const void *)from, n); 27 if (unlikely(!n))
28 return;
29
30 /* Align any unaligned source IO */
31 if (unlikely(1 & (unsigned long)from)) {
32 movs("b", to, from);
33 n--;
34 }
35 if (n > 1 && unlikely(2 & (unsigned long)from)) {
36 movs("w", to, from);
37 n-=2;
38 }
39 rep_movs(to, (const void *)from, n);
25} 40}
26EXPORT_SYMBOL(memcpy_fromio); 41EXPORT_SYMBOL(memcpy_fromio);
27 42
28void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) 43void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
29{ 44{
30 __iomem_memcpy((void *)to, (const void *) from, n); 45 if (unlikely(!n))
46 return;
47
48 /* Align any unaligned destination IO */
49 if (unlikely(1 & (unsigned long)to)) {
50 movs("b", to, from);
51 n--;
52 }
53 if (n > 1 && unlikely(2 & (unsigned long)to)) {
54 movs("w", to, from);
55 n-=2;
56 }
57 rep_movs((void *)to, (const void *) from, n);
31} 58}
32EXPORT_SYMBOL(memcpy_toio); 59EXPORT_SYMBOL(memcpy_toio);
33 60
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index 79778ab200e4..a53665116458 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -36,8 +36,8 @@ static inline u16 i8254(void)
36 u16 status, timer; 36 u16 status, timer;
37 37
38 do { 38 do {
39 outb(I8254_PORT_CONTROL, 39 outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
40 I8254_CMD_READBACK | I8254_SELECT_COUNTER0); 40 I8254_PORT_CONTROL);
41 status = inb(I8254_PORT_COUNTER0); 41 status = inb(I8254_PORT_COUNTER0);
42 timer = inb(I8254_PORT_COUNTER0); 42 timer = inb(I8254_PORT_COUNTER0);
43 timer |= inb(I8254_PORT_COUNTER0) << 8; 43 timer |= inb(I8254_PORT_COUNTER0) << 8;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2ff25ad33233..9d5c75f02295 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -595,7 +595,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
595 return; 595 return;
596 } 596 }
597 597
598 addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24); 598 addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
599#ifdef CONFIG_X86_64 599#ifdef CONFIG_X86_64
600 addr |= ((u64)desc.base3 << 32); 600 addr |= ((u64)desc.base3 << 32);
601#endif 601#endif
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index a19ef1a416ff..4aa9b1480866 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -158,8 +158,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
158 pmd = pmd_offset(pud, ppd->vaddr); 158 pmd = pmd_offset(pud, ppd->vaddr);
159 if (pmd_none(*pmd)) { 159 if (pmd_none(*pmd)) {
160 pte = ppd->pgtable_area; 160 pte = ppd->pgtable_area;
161 memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); 161 memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
162 ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; 162 ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
163 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); 163 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
164 } 164 }
165 165
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4f8972311a77..14e6119838a6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn)
230 230
231#endif 231#endif
232 232
233/*
234 * See set_mce_nospec().
235 *
236 * Machine check recovery code needs to change cache mode of poisoned pages to
237 * UC to avoid speculative access logging another error. But passing the
238 * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
239 * speculative access. So we cheat and flip the top bit of the address. This
240 * works fine for the code that updates the page tables. But at the end of the
241 * process we need to flush the TLB and cache and the non-canonical address
242 * causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
243 *
244 * But in the common case we already have a canonical address. This code
245 * will fix the top bit if needed and is a no-op otherwise.
246 */
247static inline unsigned long fix_addr(unsigned long addr)
248{
249#ifdef CONFIG_X86_64
250 return (long)(addr << 1) >> 1;
251#else
252 return addr;
253#endif
254}
255
233static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) 256static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
234{ 257{
235 if (cpa->flags & CPA_PAGES_ARRAY) { 258 if (cpa->flags & CPA_PAGES_ARRAY) {
@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data)
313 unsigned int i; 336 unsigned int i;
314 337
315 for (i = 0; i < cpa->numpages; i++) 338 for (i = 0; i < cpa->numpages; i++)
316 __flush_tlb_one_kernel(__cpa_addr(cpa, i)); 339 __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
317} 340}
318 341
319static void cpa_flush(struct cpa_data *data, int cache) 342static void cpa_flush(struct cpa_data *data, int cache)
@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
347 * Only flush present addresses: 370 * Only flush present addresses:
348 */ 371 */
349 if (pte && (pte_val(*pte) & _PAGE_PRESENT)) 372 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
350 clflush_cache_range_opt((void *)addr, PAGE_SIZE); 373 clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
351 } 374 }
352 mb(); 375 mb();
353} 376}
@@ -1627,29 +1650,6 @@ out:
1627 return ret; 1650 return ret;
1628} 1651}
1629 1652
1630/*
1631 * Machine check recovery code needs to change cache mode of poisoned
1632 * pages to UC to avoid speculative access logging another error. But
1633 * passing the address of the 1:1 mapping to set_memory_uc() is a fine
1634 * way to encourage a speculative access. So we cheat and flip the top
1635 * bit of the address. This works fine for the code that updates the
1636 * page tables. But at the end of the process we need to flush the cache
1637 * and the non-canonical address causes a #GP fault when used by the
1638 * CLFLUSH instruction.
1639 *
1640 * But in the common case we already have a canonical address. This code
1641 * will fix the top bit if needed and is a no-op otherwise.
1642 */
1643static inline unsigned long make_addr_canonical_again(unsigned long addr)
1644{
1645#ifdef CONFIG_X86_64
1646 return (long)(addr << 1) >> 1;
1647#else
1648 return addr;
1649#endif
1650}
1651
1652
1653static int change_page_attr_set_clr(unsigned long *addr, int numpages, 1653static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1654 pgprot_t mask_set, pgprot_t mask_clr, 1654 pgprot_t mask_set, pgprot_t mask_clr,
1655 int force_split, int in_flag, 1655 int force_split, int in_flag,
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 4a6a5a26c582..eb33432f2f24 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -29,7 +29,8 @@
29 29
30struct uv_systab *uv_systab; 30struct uv_systab *uv_systab;
31 31
32s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) 32static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
33 u64 a4, u64 a5)
33{ 34{
34 struct uv_systab *tab = uv_systab; 35 struct uv_systab *tab = uv_systab;
35 s64 ret; 36 s64 ret;
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
51 52
52 return ret; 53 return ret;
53} 54}
55
56s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
57{
58 s64 ret;
59
60 if (down_interruptible(&__efi_uv_runtime_lock))
61 return BIOS_STATUS_ABORT;
62
63 ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
64 up(&__efi_uv_runtime_lock);
65
66 return ret;
67}
54EXPORT_SYMBOL_GPL(uv_bios_call); 68EXPORT_SYMBOL_GPL(uv_bios_call);
55 69
56s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, 70s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
59 unsigned long bios_flags; 73 unsigned long bios_flags;
60 s64 ret; 74 s64 ret;
61 75
76 if (down_interruptible(&__efi_uv_runtime_lock))
77 return BIOS_STATUS_ABORT;
78
62 local_irq_save(bios_flags); 79 local_irq_save(bios_flags);
63 ret = uv_bios_call(which, a1, a2, a3, a4, a5); 80 ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
64 local_irq_restore(bios_flags); 81 local_irq_restore(bios_flags);
65 82
83 up(&__efi_uv_runtime_lock);
84
66 return ret; 85 return ret;
67} 86}
68 87
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 2f6787fc7106..c54a493e139a 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
898 val = native_read_msr_safe(msr, err); 898 val = native_read_msr_safe(msr, err);
899 switch (msr) { 899 switch (msr) {
900 case MSR_IA32_APICBASE: 900 case MSR_IA32_APICBASE:
901#ifdef CONFIG_X86_X2APIC 901 val &= ~X2APIC_ENABLE;
902 if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
903#endif
904 val &= ~X2APIC_ENABLE;
905 break; 902 break;
906 } 903 }
907 return val; 904 return val;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 72bf446c3fee..6e29794573b7 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -361,8 +361,6 @@ void xen_timer_resume(void)
361{ 361{
362 int cpu; 362 int cpu;
363 363
364 pvclock_resume();
365
366 if (xen_clockevent != &xen_vcpuop_clockevent) 364 if (xen_clockevent != &xen_vcpuop_clockevent)
367 return; 365 return;
368 366
@@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
379}; 377};
380 378
381static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; 379static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
380static u64 xen_clock_value_saved;
382 381
383void xen_save_time_memory_area(void) 382void xen_save_time_memory_area(void)
384{ 383{
385 struct vcpu_register_time_memory_area t; 384 struct vcpu_register_time_memory_area t;
386 int ret; 385 int ret;
387 386
387 xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
388
388 if (!xen_clock) 389 if (!xen_clock)
389 return; 390 return;
390 391
@@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
404 int ret; 405 int ret;
405 406
406 if (!xen_clock) 407 if (!xen_clock)
407 return; 408 goto out;
408 409
409 t.addr.v = &xen_clock->pvti; 410 t.addr.v = &xen_clock->pvti;
410 411
@@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
421 if (ret != 0) 422 if (ret != 0)
422 pr_notice("Cannot restore secondary vcpu_time_info (err %d)", 423 pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
423 ret); 424 ret);
425
426out:
427 /* Need pvclock_resume() before using xen_clocksource_read(). */
428 pvclock_resume();
429 xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
424} 430}
425 431
426static void xen_setup_vsyscall_time_info(void) 432static void xen_setup_vsyscall_time_info(void)
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 20a0756f27ef..ce91682770cb 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -164,7 +164,7 @@ config XTENSA_FAKE_NMI
164 If unsure, say N. 164 If unsure, say N.
165 165
166config XTENSA_UNALIGNED_USER 166config XTENSA_UNALIGNED_USER
167 bool "Unaligned memory access in use space" 167 bool "Unaligned memory access in user space"
168 help 168 help
169 The Xtensa architecture currently does not handle unaligned 169 The Xtensa architecture currently does not handle unaligned
170 memory accesses in hardware but through an exception handler. 170 memory accesses in hardware but through an exception handler.
@@ -451,7 +451,7 @@ config USE_OF
451 help 451 help
452 Include support for flattened device tree machine descriptions. 452 Include support for flattened device tree machine descriptions.
453 453
454config BUILTIN_DTB 454config BUILTIN_DTB_SOURCE
455 string "DTB to build into the kernel image" 455 string "DTB to build into the kernel image"
456 depends on OF 456 depends on OF
457 457
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
index f8052ba5aea8..0b8d00cdae7c 100644
--- a/arch/xtensa/boot/dts/Makefile
+++ b/arch/xtensa/boot/dts/Makefile
@@ -7,9 +7,9 @@
7# 7#
8# 8#
9 9
10BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o 10BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
11ifneq ($(CONFIG_BUILTIN_DTB),"") 11ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
12obj-$(CONFIG_OF) += $(BUILTIN_DTB) 12obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE)
13endif 13endif
14 14
15# for CONFIG_OF_ALL_DTBS test 15# for CONFIG_OF_ALL_DTBS test
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index 2bf964df37ba..f378e56f9ce6 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -34,7 +34,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
34CONFIG_CMDLINE_BOOL=y 34CONFIG_CMDLINE_BOOL=y
35CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" 35CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
36CONFIG_USE_OF=y 36CONFIG_USE_OF=y
37CONFIG_BUILTIN_DTB="kc705" 37CONFIG_BUILTIN_DTB_SOURCE="kc705"
38# CONFIG_COMPACTION is not set 38# CONFIG_COMPACTION is not set
39# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 39# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
40CONFIG_PM=y 40CONFIG_PM=y
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
index 3221b7053fa3..62f32a902568 100644
--- a/arch/xtensa/configs/cadence_csp_defconfig
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -38,7 +38,7 @@ CONFIG_HIGHMEM=y
38# CONFIG_PCI is not set 38# CONFIG_PCI is not set
39CONFIG_XTENSA_PLATFORM_XTFPGA=y 39CONFIG_XTENSA_PLATFORM_XTFPGA=y
40CONFIG_USE_OF=y 40CONFIG_USE_OF=y
41CONFIG_BUILTIN_DTB="csp" 41CONFIG_BUILTIN_DTB_SOURCE="csp"
42# CONFIG_COMPACTION is not set 42# CONFIG_COMPACTION is not set
43CONFIG_XTFPGA_LCD=y 43CONFIG_XTFPGA_LCD=y
44# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 44# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index 985fa8546e4e..8bebe07f1060 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -33,7 +33,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
33CONFIG_CMDLINE_BOOL=y 33CONFIG_CMDLINE_BOOL=y
34CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" 34CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
35CONFIG_USE_OF=y 35CONFIG_USE_OF=y
36CONFIG_BUILTIN_DTB="kc705" 36CONFIG_BUILTIN_DTB_SOURCE="kc705"
37# CONFIG_COMPACTION is not set 37# CONFIG_COMPACTION is not set
38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
39CONFIG_NET=y 39CONFIG_NET=y
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index f3fc4f970ca8..933ab2adf434 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -39,7 +39,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
39CONFIG_CMDLINE_BOOL=y 39CONFIG_CMDLINE_BOOL=y
40CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000" 40CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
41CONFIG_USE_OF=y 41CONFIG_USE_OF=y
42CONFIG_BUILTIN_DTB="kc705_nommu" 42CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu"
43CONFIG_BINFMT_FLAT=y 43CONFIG_BINFMT_FLAT=y
44CONFIG_NET=y 44CONFIG_NET=y
45CONFIG_PACKET=y 45CONFIG_PACKET=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 11fed6c06a7c..e29c5b179a5b 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -33,11 +33,12 @@ CONFIG_SMP=y
33CONFIG_HOTPLUG_CPU=y 33CONFIG_HOTPLUG_CPU=y
34# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set 34# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
35# CONFIG_PCI is not set 35# CONFIG_PCI is not set
36CONFIG_VECTORS_OFFSET=0x00002000
36CONFIG_XTENSA_PLATFORM_XTFPGA=y 37CONFIG_XTENSA_PLATFORM_XTFPGA=y
37CONFIG_CMDLINE_BOOL=y 38CONFIG_CMDLINE_BOOL=y
38CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0" 39CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
39CONFIG_USE_OF=y 40CONFIG_USE_OF=y
40CONFIG_BUILTIN_DTB="lx200mx" 41CONFIG_BUILTIN_DTB_SOURCE="lx200mx"
41# CONFIG_COMPACTION is not set 42# CONFIG_COMPACTION is not set
42# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 43# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
43CONFIG_NET=y 44CONFIG_NET=y
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index da08e75100ab..7f009719304e 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -276,12 +276,13 @@ should_never_return:
276 276
277 movi a2, cpu_start_ccount 277 movi a2, cpu_start_ccount
2781: 2781:
279 memw
279 l32i a3, a2, 0 280 l32i a3, a2, 0
280 beqi a3, 0, 1b 281 beqi a3, 0, 1b
281 movi a3, 0 282 movi a3, 0
282 s32i a3, a2, 0 283 s32i a3, a2, 0
283 memw
2841: 2841:
285 memw
285 l32i a3, a2, 0 286 l32i a3, a2, 0
286 beqi a3, 0, 1b 287 beqi a3, 0, 1b
287 wsr a3, ccount 288 wsr a3, ccount
@@ -317,11 +318,13 @@ ENTRY(cpu_restart)
317 rsr a0, prid 318 rsr a0, prid
318 neg a2, a0 319 neg a2, a0
319 movi a3, cpu_start_id 320 movi a3, cpu_start_id
321 memw
320 s32i a2, a3, 0 322 s32i a2, a3, 0
321#if XCHAL_DCACHE_IS_WRITEBACK 323#if XCHAL_DCACHE_IS_WRITEBACK
322 dhwbi a3, 0 324 dhwbi a3, 0
323#endif 325#endif
3241: 3261:
327 memw
325 l32i a2, a3, 0 328 l32i a2, a3, 0
326 dhi a3, 0 329 dhi a3, 0
327 bne a2, a0, 1b 330 bne a2, a0, 1b
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 932d64689bac..be1f280c322c 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
83{ 83{
84 unsigned i; 84 unsigned i;
85 85
86 for (i = 0; i < max_cpus; ++i) 86 for_each_possible_cpu(i)
87 set_cpu_present(i, true); 87 set_cpu_present(i, true);
88} 88}
89 89
@@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
96 pr_info("%s: Core Count = %d\n", __func__, ncpus); 96 pr_info("%s: Core Count = %d\n", __func__, ncpus);
97 pr_info("%s: Core Id = %d\n", __func__, core_id); 97 pr_info("%s: Core Id = %d\n", __func__, core_id);
98 98
99 if (ncpus > NR_CPUS) {
100 ncpus = NR_CPUS;
101 pr_info("%s: limiting core count by %d\n", __func__, ncpus);
102 }
103
99 for (i = 0; i < ncpus; ++i) 104 for (i = 0; i < ncpus; ++i)
100 set_cpu_possible(i, true); 105 set_cpu_possible(i, true);
101} 106}
@@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
195 int i; 200 int i;
196 201
197#ifdef CONFIG_HOTPLUG_CPU 202#ifdef CONFIG_HOTPLUG_CPU
198 cpu_start_id = cpu; 203 WRITE_ONCE(cpu_start_id, cpu);
199 system_flush_invalidate_dcache_range( 204 /* Pairs with the third memw in the cpu_restart */
200 (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); 205 mb();
206 system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
207 sizeof(cpu_start_id));
201#endif 208#endif
202 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); 209 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
203 210
@@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
206 ccount = get_ccount(); 213 ccount = get_ccount();
207 while (!ccount); 214 while (!ccount);
208 215
209 cpu_start_ccount = ccount; 216 WRITE_ONCE(cpu_start_ccount, ccount);
210 217
211 while (time_before(jiffies, timeout)) { 218 do {
219 /*
220 * Pairs with the first two memws in the
221 * .Lboot_secondary.
222 */
212 mb(); 223 mb();
213 if (!cpu_start_ccount) 224 ccount = READ_ONCE(cpu_start_ccount);
214 break; 225 } while (ccount && time_before(jiffies, timeout));
215 }
216 226
217 if (cpu_start_ccount) { 227 if (ccount) {
218 smp_call_function_single(0, mx_cpu_stop, 228 smp_call_function_single(0, mx_cpu_stop,
219 (void *)cpu, 1); 229 (void *)cpu, 1);
220 cpu_start_ccount = 0; 230 WRITE_ONCE(cpu_start_ccount, 0);
221 return -EIO; 231 return -EIO;
222 } 232 }
223 } 233 }
@@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
237 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", 247 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
238 __func__, cpu, idle, start_info.stack); 248 __func__, cpu, idle, start_info.stack);
239 249
250 init_completion(&cpu_running);
240 ret = boot_secondary(cpu, idle); 251 ret = boot_secondary(cpu, idle);
241 if (ret == 0) { 252 if (ret == 0) {
242 wait_for_completion_timeout(&cpu_running, 253 wait_for_completion_timeout(&cpu_running,
@@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
298 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 309 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
299 while (time_before(jiffies, timeout)) { 310 while (time_before(jiffies, timeout)) {
300 system_invalidate_dcache_range((unsigned long)&cpu_start_id, 311 system_invalidate_dcache_range((unsigned long)&cpu_start_id,
301 sizeof(cpu_start_id)); 312 sizeof(cpu_start_id));
302 if (cpu_start_id == -cpu) { 313 /* Pairs with the second memw in the cpu_restart */
314 mb();
315 if (READ_ONCE(cpu_start_id) == -cpu) {
303 platform_cpu_kill(cpu); 316 platform_cpu_kill(cpu);
304 return; 317 return;
305 } 318 }
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index fd524a54d2ab..378186b5eb40 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
89 container_of(evt, struct ccount_timer, evt); 89 container_of(evt, struct ccount_timer, evt);
90 90
91 if (timer->irq_enabled) { 91 if (timer->irq_enabled) {
92 disable_irq(evt->irq); 92 disable_irq_nosync(evt->irq);
93 timer->irq_enabled = 0; 93 timer->irq_enabled = 0;
94 } 94 }
95 return 0; 95 return 0;
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 63e0f12be7c9..72adbbe975d5 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
1154} 1154}
1155 1155
1156/** 1156/**
1157 * __bfq_deactivate_entity - deactivate an entity from its service tree. 1157 * __bfq_deactivate_entity - update sched_data and service trees for
1158 * @entity: the entity to deactivate. 1158 * entity, so as to represent entity as inactive
1159 * @entity: the entity being deactivated.
1159 * @ins_into_idle_tree: if false, the entity will not be put into the 1160 * @ins_into_idle_tree: if false, the entity will not be put into the
1160 * idle tree. 1161 * idle tree.
1161 * 1162 *
1162 * Deactivates an entity, independently of its previous state. Must 1163 * If necessary and allowed, puts entity into the idle tree. NOTE:
1163 * be invoked only if entity is on a service tree. Extracts the entity 1164 * entity may be on no tree if in service.
1164 * from that tree, and if necessary and allowed, puts it into the idle
1165 * tree.
1166 */ 1165 */
1167bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) 1166bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1168{ 1167{
diff --git a/block/blk-core.c b/block/blk-core.c
index c78042975737..6b78ec56a4f2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
462 kblockd_schedule_work(&q->timeout_work); 462 kblockd_schedule_work(&q->timeout_work);
463} 463}
464 464
465static void blk_timeout_work(struct work_struct *work)
466{
467}
468
465/** 469/**
466 * blk_alloc_queue_node - allocate a request queue 470 * blk_alloc_queue_node - allocate a request queue
467 * @gfp_mask: memory allocation flags 471 * @gfp_mask: memory allocation flags
@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
505 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, 509 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
506 laptop_mode_timer_fn, 0); 510 laptop_mode_timer_fn, 0);
507 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 511 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
508 INIT_WORK(&q->timeout_work, NULL); 512 INIT_WORK(&q->timeout_work, blk_timeout_work);
509 INIT_LIST_HEAD(&q->icq_list); 513 INIT_LIST_HEAD(&q->icq_list);
510#ifdef CONFIG_BLK_CGROUP 514#ifdef CONFIG_BLK_CGROUP
511 INIT_LIST_HEAD(&q->blkg_list); 515 INIT_LIST_HEAD(&q->blkg_list);
@@ -661,7 +665,6 @@ no_merge:
661 * blk_attempt_plug_merge - try to merge with %current's plugged list 665 * blk_attempt_plug_merge - try to merge with %current's plugged list
662 * @q: request_queue new bio is being queued at 666 * @q: request_queue new bio is being queued at
663 * @bio: new bio being queued 667 * @bio: new bio being queued
664 * @request_count: out parameter for number of traversed plugged requests
665 * @same_queue_rq: pointer to &struct request that gets filled in when 668 * @same_queue_rq: pointer to &struct request that gets filled in when
666 * another request associated with @q is found on the plug list 669 * another request associated with @q is found on the plug list
667 * (optional, may be %NULL) 670 * (optional, may be %NULL)
@@ -1683,6 +1686,15 @@ EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1683 * @plug: The &struct blk_plug that needs to be initialized 1686 * @plug: The &struct blk_plug that needs to be initialized
1684 * 1687 *
1685 * Description: 1688 * Description:
1689 * blk_start_plug() indicates to the block layer an intent by the caller
1690 * to submit multiple I/O requests in a batch. The block layer may use
1691 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1692 * is called. However, the block layer may choose to submit requests
1693 * before a call to blk_finish_plug() if the number of queued I/Os
1694 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1695 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1696 * the task schedules (see below).
1697 *
1686 * Tracking blk_plug inside the task_struct will help with auto-flushing the 1698 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1687 * pending I/O should the task end up blocking between blk_start_plug() and 1699 * pending I/O should the task end up blocking between blk_start_plug() and
1688 * blk_finish_plug(). This is important from a performance perspective, but 1700 * blk_finish_plug(). This is important from a performance perspective, but
@@ -1765,6 +1777,16 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1765 blk_mq_flush_plug_list(plug, from_schedule); 1777 blk_mq_flush_plug_list(plug, from_schedule);
1766} 1778}
1767 1779
1780/**
1781 * blk_finish_plug - mark the end of a batch of submitted I/O
1782 * @plug: The &struct blk_plug passed to blk_start_plug()
1783 *
1784 * Description:
1785 * Indicate that a batch of I/O submissions is complete. This function
1786 * must be paired with an initial call to blk_start_plug(). The intent
1787 * is to allow the block layer to optimize I/O submission. See the
1788 * documentation for blk_start_plug() for more information.
1789 */
1768void blk_finish_plug(struct blk_plug *plug) 1790void blk_finish_plug(struct blk_plug *plug)
1769{ 1791{
1770 if (plug != current->plug) 1792 if (plug != current->plug)
diff --git a/block/blk-flush.c b/block/blk-flush.c
index a3fc7191c694..6e0f2d97fc6d 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
335 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); 335 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
336 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 336 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
337 337
338 blk_mq_run_hw_queue(hctx, true); 338 blk_mq_sched_restart(hctx);
339} 339}
340 340
341/** 341/**
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index fc714ef402a6..2620baa1f699 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -72,6 +72,7 @@
72#include <linux/sched/loadavg.h> 72#include <linux/sched/loadavg.h>
73#include <linux/sched/signal.h> 73#include <linux/sched/signal.h>
74#include <trace/events/block.h> 74#include <trace/events/block.h>
75#include <linux/blk-mq.h>
75#include "blk-rq-qos.h" 76#include "blk-rq-qos.h"
76#include "blk-stat.h" 77#include "blk-stat.h"
77 78
@@ -591,6 +592,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
591 u64 now = ktime_to_ns(ktime_get()); 592 u64 now = ktime_to_ns(ktime_get());
592 bool issue_as_root = bio_issue_as_root_blkg(bio); 593 bool issue_as_root = bio_issue_as_root_blkg(bio);
593 bool enabled = false; 594 bool enabled = false;
595 int inflight = 0;
594 596
595 blkg = bio->bi_blkg; 597 blkg = bio->bi_blkg;
596 if (!blkg || !bio_flagged(bio, BIO_TRACKED)) 598 if (!blkg || !bio_flagged(bio, BIO_TRACKED))
@@ -601,6 +603,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
601 return; 603 return;
602 604
603 enabled = blk_iolatency_enabled(iolat->blkiolat); 605 enabled = blk_iolatency_enabled(iolat->blkiolat);
606 if (!enabled)
607 return;
608
604 while (blkg && blkg->parent) { 609 while (blkg && blkg->parent) {
605 iolat = blkg_to_lat(blkg); 610 iolat = blkg_to_lat(blkg);
606 if (!iolat) { 611 if (!iolat) {
@@ -609,8 +614,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
609 } 614 }
610 rqw = &iolat->rq_wait; 615 rqw = &iolat->rq_wait;
611 616
612 atomic_dec(&rqw->inflight); 617 inflight = atomic_dec_return(&rqw->inflight);
613 if (!enabled || iolat->min_lat_nsec == 0) 618 WARN_ON_ONCE(inflight < 0);
619 if (iolat->min_lat_nsec == 0)
614 goto next; 620 goto next;
615 iolatency_record_time(iolat, &bio->bi_issue, now, 621 iolatency_record_time(iolat, &bio->bi_issue, now,
616 issue_as_root); 622 issue_as_root);
@@ -754,10 +760,13 @@ int blk_iolatency_init(struct request_queue *q)
754 return 0; 760 return 0;
755} 761}
756 762
757static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) 763/*
764 * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
765 * return 0.
766 */
767static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
758{ 768{
759 struct iolatency_grp *iolat = blkg_to_lat(blkg); 769 struct iolatency_grp *iolat = blkg_to_lat(blkg);
760 struct blk_iolatency *blkiolat = iolat->blkiolat;
761 u64 oldval = iolat->min_lat_nsec; 770 u64 oldval = iolat->min_lat_nsec;
762 771
763 iolat->min_lat_nsec = val; 772 iolat->min_lat_nsec = val;
@@ -766,9 +775,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
766 BLKIOLATENCY_MAX_WIN_SIZE); 775 BLKIOLATENCY_MAX_WIN_SIZE);
767 776
768 if (!oldval && val) 777 if (!oldval && val)
769 atomic_inc(&blkiolat->enabled); 778 return 1;
770 if (oldval && !val) 779 if (oldval && !val)
771 atomic_dec(&blkiolat->enabled); 780 return -1;
781 return 0;
772} 782}
773 783
774static void iolatency_clear_scaling(struct blkcg_gq *blkg) 784static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@@ -800,6 +810,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
800 u64 lat_val = 0; 810 u64 lat_val = 0;
801 u64 oldval; 811 u64 oldval;
802 int ret; 812 int ret;
813 int enable = 0;
803 814
804 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); 815 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
805 if (ret) 816 if (ret)
@@ -834,7 +845,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
834 blkg = ctx.blkg; 845 blkg = ctx.blkg;
835 oldval = iolat->min_lat_nsec; 846 oldval = iolat->min_lat_nsec;
836 847
837 iolatency_set_min_lat_nsec(blkg, lat_val); 848 enable = iolatency_set_min_lat_nsec(blkg, lat_val);
849 if (enable) {
850 WARN_ON_ONCE(!blk_get_queue(blkg->q));
851 blkg_get(blkg);
852 }
853
838 if (oldval != iolat->min_lat_nsec) { 854 if (oldval != iolat->min_lat_nsec) {
839 iolatency_clear_scaling(blkg); 855 iolatency_clear_scaling(blkg);
840 } 856 }
@@ -842,6 +858,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
842 ret = 0; 858 ret = 0;
843out: 859out:
844 blkg_conf_finish(&ctx); 860 blkg_conf_finish(&ctx);
861 if (ret == 0 && enable) {
862 struct iolatency_grp *tmp = blkg_to_lat(blkg);
863 struct blk_iolatency *blkiolat = tmp->blkiolat;
864
865 blk_mq_freeze_queue(blkg->q);
866
867 if (enable == 1)
868 atomic_inc(&blkiolat->enabled);
869 else if (enable == -1)
870 atomic_dec(&blkiolat->enabled);
871 else
872 WARN_ON_ONCE(1);
873
874 blk_mq_unfreeze_queue(blkg->q);
875
876 blkg_put(blkg);
877 blk_put_queue(blkg->q);
878 }
845 return ret ?: nbytes; 879 return ret ?: nbytes;
846} 880}
847 881
@@ -977,8 +1011,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
977{ 1011{
978 struct iolatency_grp *iolat = pd_to_lat(pd); 1012 struct iolatency_grp *iolat = pd_to_lat(pd);
979 struct blkcg_gq *blkg = lat_to_blkg(iolat); 1013 struct blkcg_gq *blkg = lat_to_blkg(iolat);
1014 struct blk_iolatency *blkiolat = iolat->blkiolat;
1015 int ret;
980 1016
981 iolatency_set_min_lat_nsec(blkg, 0); 1017 ret = iolatency_set_min_lat_nsec(blkg, 0);
1018 if (ret == 1)
1019 atomic_inc(&blkiolat->enabled);
1020 if (ret == -1)
1021 atomic_dec(&blkiolat->enabled);
982 iolatency_clear_scaling(blkg); 1022 iolatency_clear_scaling(blkg);
983} 1023}
984 1024
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c
index fb2c82c351e4..038cb627c868 100644
--- a/block/blk-mq-debugfs-zoned.c
+++ b/block/blk-mq-debugfs-zoned.c
@@ -1,8 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 *
5 * This file is released under the GPL.
6 */ 4 */
7 5
8#include <linux/blkdev.h> 6#include <linux/blkdev.h>
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 90d68760af08..7921573aebbc 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -308,8 +308,9 @@ static const char *const cmd_flag_name[] = {
308 CMD_FLAG_NAME(PREFLUSH), 308 CMD_FLAG_NAME(PREFLUSH),
309 CMD_FLAG_NAME(RAHEAD), 309 CMD_FLAG_NAME(RAHEAD),
310 CMD_FLAG_NAME(BACKGROUND), 310 CMD_FLAG_NAME(BACKGROUND),
311 CMD_FLAG_NAME(NOUNMAP),
312 CMD_FLAG_NAME(NOWAIT), 311 CMD_FLAG_NAME(NOWAIT),
312 CMD_FLAG_NAME(NOUNMAP),
313 CMD_FLAG_NAME(HIPRI),
313}; 314};
314#undef CMD_FLAG_NAME 315#undef CMD_FLAG_NAME
315 316
@@ -838,6 +839,9 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
838static bool debugfs_create_files(struct dentry *parent, void *data, 839static bool debugfs_create_files(struct dentry *parent, void *data,
839 const struct blk_mq_debugfs_attr *attr) 840 const struct blk_mq_debugfs_attr *attr)
840{ 841{
842 if (IS_ERR_OR_NULL(parent))
843 return false;
844
841 d_inode(parent)->i_private = data; 845 d_inode(parent)->i_private = data;
842 846
843 for (; attr->name; attr++) { 847 for (; attr->name; attr++) {
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3ba37b9e15e9..9437a5eb07cf 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -737,12 +737,20 @@ static void blk_mq_requeue_work(struct work_struct *work)
737 spin_unlock_irq(&q->requeue_lock); 737 spin_unlock_irq(&q->requeue_lock);
738 738
739 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 739 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
740 if (!(rq->rq_flags & RQF_SOFTBARRIER)) 740 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
741 continue; 741 continue;
742 742
743 rq->rq_flags &= ~RQF_SOFTBARRIER; 743 rq->rq_flags &= ~RQF_SOFTBARRIER;
744 list_del_init(&rq->queuelist); 744 list_del_init(&rq->queuelist);
745 blk_mq_sched_insert_request(rq, true, false, false); 745 /*
746 * If RQF_DONTPREP, rq has contained some driver specific
747 * data, so insert it to hctx dispatch list to avoid any
748 * merge.
749 */
750 if (rq->rq_flags & RQF_DONTPREP)
751 blk_mq_request_bypass_insert(rq, false);
752 else
753 blk_mq_sched_insert_request(rq, true, false, false);
746 } 754 }
747 755
748 while (!list_empty(&rq_list)) { 756 while (!list_empty(&rq_list)) {
@@ -1906,7 +1914,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1906{ 1914{
1907 const int is_sync = op_is_sync(bio->bi_opf); 1915 const int is_sync = op_is_sync(bio->bi_opf);
1908 const int is_flush_fua = op_is_flush(bio->bi_opf); 1916 const int is_flush_fua = op_is_flush(bio->bi_opf);
1909 struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf }; 1917 struct blk_mq_alloc_data data = { .flags = 0};
1910 struct request *rq; 1918 struct request *rq;
1911 struct blk_plug *plug; 1919 struct blk_plug *plug;
1912 struct request *same_queue_rq = NULL; 1920 struct request *same_queue_rq = NULL;
@@ -1928,6 +1936,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1928 1936
1929 rq_qos_throttle(q, bio); 1937 rq_qos_throttle(q, bio);
1930 1938
1939 data.cmd_flags = bio->bi_opf;
1931 rq = blk_mq_get_request(q, bio, &data); 1940 rq = blk_mq_get_request(q, bio, &data);
1932 if (unlikely(!rq)) { 1941 if (unlikely(!rq)) {
1933 rq_qos_cleanup(q, bio); 1942 rq_qos_cleanup(q, bio);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index d943d46b0785..d0b3dd54ef8d 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -36,7 +36,6 @@ struct blk_mq_ctx {
36 struct kobject kobj; 36 struct kobject kobj;
37} ____cacheline_aligned_in_smp; 37} ____cacheline_aligned_in_smp;
38 38
39void blk_mq_freeze_queue(struct request_queue *q);
40void blk_mq_free_queue(struct request_queue *q); 39void blk_mq_free_queue(struct request_queue *q);
41int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 40int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
42void blk_mq_wake_waiters(struct request_queue *q); 41void blk_mq_wake_waiters(struct request_queue *q);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index f0c56649775f..fd166fbb0f65 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -597,7 +597,7 @@ static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
597 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio); 597 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
598} 598}
599 599
600void wbt_issue(struct rq_qos *rqos, struct request *rq) 600static void wbt_issue(struct rq_qos *rqos, struct request *rq)
601{ 601{
602 struct rq_wb *rwb = RQWB(rqos); 602 struct rq_wb *rwb = RQWB(rqos);
603 603
@@ -617,7 +617,7 @@ void wbt_issue(struct rq_qos *rqos, struct request *rq)
617 } 617 }
618} 618}
619 619
620void wbt_requeue(struct rq_qos *rqos, struct request *rq) 620static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
621{ 621{
622 struct rq_wb *rwb = RQWB(rqos); 622 struct rq_wb *rwb = RQWB(rqos);
623 if (!rwb_enabled(rwb)) 623 if (!rwb_enabled(rwb))
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 6651e713c45d..5564e73266a6 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
539 ictx = skcipher_instance_ctx(inst); 539 ictx = skcipher_instance_ctx(inst);
540 540
541 /* Stream cipher, e.g. "xchacha12" */ 541 /* Stream cipher, e.g. "xchacha12" */
542 crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
543 skcipher_crypto_instance(inst));
542 err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, 544 err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
543 0, crypto_requires_sync(algt->type, 545 0, crypto_requires_sync(algt->type,
544 algt->mask)); 546 algt->mask));
@@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
547 streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); 549 streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
548 550
549 /* Block cipher, e.g. "aes" */ 551 /* Block cipher, e.g. "aes" */
552 crypto_set_spawn(&ictx->blockcipher_spawn,
553 skcipher_crypto_instance(inst));
550 err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, 554 err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
551 CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); 555 CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
552 if (err) 556 if (err)
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 17eb09d222ff..ec78a04eb136 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
122 122
123int af_alg_release(struct socket *sock) 123int af_alg_release(struct socket *sock)
124{ 124{
125 if (sock->sk) 125 if (sock->sk) {
126 sock_put(sock->sk); 126 sock_put(sock->sk);
127 sock->sk = NULL;
128 }
127 return 0; 129 return 0;
128} 130}
129EXPORT_SYMBOL_GPL(af_alg_release); 131EXPORT_SYMBOL_GPL(af_alg_release);
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 37f54d1b2f66..4be293a4b5f0 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
58 return -EINVAL; 58 return -EINVAL;
59 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 59 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
60 return -EINVAL; 60 return -EINVAL;
61 if (RTA_PAYLOAD(rta) < sizeof(*param)) 61
62 /*
63 * RTA_OK() didn't align the rtattr's payload when validating that it
64 * fits in the buffer. Yet, the keys should start on the next 4-byte
65 * aligned boundary. To avoid confusion, require that the rtattr
66 * payload be exactly the param struct, which has a 4-byte aligned size.
67 */
68 if (RTA_PAYLOAD(rta) != sizeof(*param))
62 return -EINVAL; 69 return -EINVAL;
70 BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
63 71
64 param = RTA_DATA(rta); 72 param = RTA_DATA(rta);
65 keys->enckeylen = be32_to_cpu(param->enckeylen); 73 keys->enckeylen = be32_to_cpu(param->enckeylen);
66 74
67 key += RTA_ALIGN(rta->rta_len); 75 key += rta->rta_len;
68 keylen -= RTA_ALIGN(rta->rta_len); 76 keylen -= rta->rta_len;
69 77
70 if (keylen < keys->enckeylen) 78 if (keylen < keys->enckeylen)
71 return -EINVAL; 79 return -EINVAL;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 80a25cc04aec..4741fe89ba2c 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
279 struct aead_request *req = areq->data; 279 struct aead_request *req = areq->data;
280 280
281 err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); 281 err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
282 aead_request_complete(req, err); 282 authenc_esn_request_complete(req, err);
283} 283}
284 284
285static int crypto_authenc_esn_decrypt(struct aead_request *req) 285static int crypto_authenc_esn_decrypt(struct aead_request *req)
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 9a5c60f08aad..c0cf87ae7ef6 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
100 100
101 for (i = 0; i <= 63; i++) { 101 for (i = 0; i <= 63; i++) {
102 102
103 ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); 103 ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
104 104
105 ss2 = ss1 ^ rol32(a, 12); 105 ss2 = ss1 ^ rol32(a, 12);
106 106
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7b65a807b3dd..90ff0a47c12e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -10,6 +10,7 @@ menuconfig ACPI
10 bool "ACPI (Advanced Configuration and Power Interface) Support" 10 bool "ACPI (Advanced Configuration and Power Interface) Support"
11 depends on ARCH_SUPPORTS_ACPI 11 depends on ARCH_SUPPORTS_ACPI
12 select PNP 12 select PNP
13 select NLS
13 default y if X86 14 default y if X86
14 help 15 help
15 Advanced Configuration and Power Interface (ACPI) support for 16 Advanced Configuration and Power Interface (ACPI) support for
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 7c6afc111d76..bb857421c2e8 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -41,7 +41,8 @@ acpi-y += ec.o
41acpi-$(CONFIG_ACPI_DOCK) += dock.o 41acpi-$(CONFIG_ACPI_DOCK) += dock.o
42acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o 42acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
43obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o 43obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
44acpi-y += acpi_lpss.o acpi_apd.o 44acpi-$(CONFIG_PCI) += acpi_lpss.o
45acpi-y += acpi_apd.o
45acpi-y += acpi_platform.o 46acpi-y += acpi_platform.o
46acpi-y += acpi_pnp.o 47acpi-y += acpi_pnp.o
47acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o 48acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index fdd90ffceb85..e48894e002ba 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -876,7 +876,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
876 return (resv == its->its_count) ? resv : -ENODEV; 876 return (resv == its->its_count) ? resv : -ENODEV;
877} 877}
878#else 878#else
879static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev); 879static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
880{ return NULL; } 880{ return NULL; }
881static inline int iort_add_device_replay(const struct iommu_ops *ops, 881static inline int iort_add_device_replay(const struct iommu_ops *ops,
882 struct device *dev) 882 struct device *dev)
@@ -952,9 +952,10 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
952{ 952{
953 struct acpi_iort_node *node; 953 struct acpi_iort_node *node;
954 struct acpi_iort_root_complex *rc; 954 struct acpi_iort_root_complex *rc;
955 struct pci_bus *pbus = to_pci_dev(dev)->bus;
955 956
956 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 957 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
957 iort_match_node_callback, dev); 958 iort_match_node_callback, &pbus->dev);
958 if (!node || node->revision < 1) 959 if (!node || node->revision < 1)
959 return -ENODEV; 960 return -ENODEV;
960 961
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 99d820a693a8..147f6c7ea59c 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1029,6 +1029,9 @@ void __init acpi_early_init(void)
1029 1029
1030 acpi_permanent_mmap = true; 1030 acpi_permanent_mmap = true;
1031 1031
1032 /* Initialize debug output. Linux does not use ACPICA defaults */
1033 acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
1034
1032#ifdef CONFIG_X86 1035#ifdef CONFIG_X86
1033 /* 1036 /*
1034 * If the machine falls into the DMI check table, 1037 * If the machine falls into the DMI check table,
@@ -1054,18 +1057,6 @@ void __init acpi_early_init(void)
1054 goto error0; 1057 goto error0;
1055 } 1058 }
1056 1059
1057 /*
1058 * ACPI 2.0 requires the EC driver to be loaded and work before
1059 * the EC device is found in the namespace (i.e. before
1060 * acpi_load_tables() is called).
1061 *
1062 * This is accomplished by looking for the ECDT table, and getting
1063 * the EC parameters out of that.
1064 *
1065 * Ignore the result. Not having an ECDT is not fatal.
1066 */
1067 status = acpi_ec_ecdt_probe();
1068
1069#ifdef CONFIG_X86 1060#ifdef CONFIG_X86
1070 if (!acpi_ioapic) { 1061 if (!acpi_ioapic) {
1071 /* compatible (0) means level (3) */ 1062 /* compatible (0) means level (3) */
@@ -1142,6 +1133,18 @@ static int __init acpi_bus_init(void)
1142 goto error1; 1133 goto error1;
1143 } 1134 }
1144 1135
1136 /*
1137 * ACPI 2.0 requires the EC driver to be loaded and work before the EC
1138 * device is found in the namespace.
1139 *
1140 * This is accomplished by looking for the ECDT table and getting the EC
1141 * parameters out of that.
1142 *
1143 * Do that before calling acpi_initialize_objects() which may trigger EC
1144 * address space accesses.
1145 */
1146 acpi_ec_ecdt_probe();
1147
1145 status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); 1148 status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
1146 if (ACPI_FAILURE(status)) { 1149 if (ACPI_FAILURE(status)) {
1147 printk(KERN_ERR PREFIX 1150 printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7e6952edb5b0..6a9e1fb8913a 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -81,7 +81,11 @@ void acpi_debugfs_init(void);
81#else 81#else
82static inline void acpi_debugfs_init(void) { return; } 82static inline void acpi_debugfs_init(void) { return; }
83#endif 83#endif
84#ifdef CONFIG_PCI
84void acpi_lpss_init(void); 85void acpi_lpss_init(void);
86#else
87static inline void acpi_lpss_init(void) {}
88#endif
85 89
86void acpi_apd_init(void); 90void acpi_apd_init(void);
87 91
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 011d3db19c80..e18ade5d74e9 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -26,7 +26,6 @@
26#include <acpi/nfit.h> 26#include <acpi/nfit.h>
27#include "intel.h" 27#include "intel.h"
28#include "nfit.h" 28#include "nfit.h"
29#include "intel.h"
30 29
31/* 30/*
32 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 31 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
@@ -78,12 +77,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id)
78} 77}
79EXPORT_SYMBOL(to_nfit_uuid); 78EXPORT_SYMBOL(to_nfit_uuid);
80 79
81static struct acpi_nfit_desc *to_acpi_nfit_desc(
82 struct nvdimm_bus_descriptor *nd_desc)
83{
84 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
85}
86
87static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 80static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
88{ 81{
89 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 82 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
@@ -416,10 +409,36 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
416 return true; 409 return true;
417} 410}
418 411
412static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
413 struct nd_cmd_pkg *call_pkg)
414{
415 if (call_pkg) {
416 int i;
417
418 if (nfit_mem->family != call_pkg->nd_family)
419 return -ENOTTY;
420
421 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
422 if (call_pkg->nd_reserved2[i])
423 return -EINVAL;
424 return call_pkg->nd_command;
425 }
426
427 /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
428 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
429 return cmd;
430
431 /*
432 * Force function number validation to fail since 0 is never
433 * published as a valid function in dsm_mask.
434 */
435 return 0;
436}
437
419int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 438int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
420 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 439 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
421{ 440{
422 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 441 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
423 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 442 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
424 union acpi_object in_obj, in_buf, *out_obj; 443 union acpi_object in_obj, in_buf, *out_obj;
425 const struct nd_cmd_desc *desc = NULL; 444 const struct nd_cmd_desc *desc = NULL;
@@ -429,30 +448,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
429 unsigned long cmd_mask, dsm_mask; 448 unsigned long cmd_mask, dsm_mask;
430 u32 offset, fw_status = 0; 449 u32 offset, fw_status = 0;
431 acpi_handle handle; 450 acpi_handle handle;
432 unsigned int func;
433 const guid_t *guid; 451 const guid_t *guid;
434 int rc, i; 452 int func, rc, i;
435 453
436 if (cmd_rc) 454 if (cmd_rc)
437 *cmd_rc = -EINVAL; 455 *cmd_rc = -EINVAL;
438 func = cmd;
439 if (cmd == ND_CMD_CALL) {
440 call_pkg = buf;
441 func = call_pkg->nd_command;
442
443 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
444 if (call_pkg->nd_reserved2[i])
445 return -EINVAL;
446 }
447 456
448 if (nvdimm) { 457 if (nvdimm) {
449 struct acpi_device *adev = nfit_mem->adev; 458 struct acpi_device *adev = nfit_mem->adev;
450 459
451 if (!adev) 460 if (!adev)
452 return -ENOTTY; 461 return -ENOTTY;
453 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
454 return -ENOTTY;
455 462
463 if (cmd == ND_CMD_CALL)
464 call_pkg = buf;
465 func = cmd_to_func(nfit_mem, cmd, call_pkg);
466 if (func < 0)
467 return func;
456 dimm_name = nvdimm_name(nvdimm); 468 dimm_name = nvdimm_name(nvdimm);
457 cmd_name = nvdimm_cmd_name(cmd); 469 cmd_name = nvdimm_cmd_name(cmd);
458 cmd_mask = nvdimm_cmd_mask(nvdimm); 470 cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -463,6 +475,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
463 } else { 475 } else {
464 struct acpi_device *adev = to_acpi_dev(acpi_desc); 476 struct acpi_device *adev = to_acpi_dev(acpi_desc);
465 477
478 func = cmd;
466 cmd_name = nvdimm_bus_cmd_name(cmd); 479 cmd_name = nvdimm_bus_cmd_name(cmd);
467 cmd_mask = nd_desc->cmd_mask; 480 cmd_mask = nd_desc->cmd_mask;
468 dsm_mask = cmd_mask; 481 dsm_mask = cmd_mask;
@@ -477,7 +490,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
477 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 490 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
478 return -ENOTTY; 491 return -ENOTTY;
479 492
480 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) 493 /*
494 * Check for a valid command. For ND_CMD_CALL, we also have to
495 * make sure that the DSM function is supported.
496 */
497 if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
498 return -ENOTTY;
499 else if (!test_bit(cmd, &cmd_mask))
481 return -ENOTTY; 500 return -ENOTTY;
482 501
483 in_obj.type = ACPI_TYPE_PACKAGE; 502 in_obj.type = ACPI_TYPE_PACKAGE;
@@ -721,6 +740,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
721 struct acpi_nfit_memory_map *memdev; 740 struct acpi_nfit_memory_map *memdev;
722 struct acpi_nfit_desc *acpi_desc; 741 struct acpi_nfit_desc *acpi_desc;
723 struct nfit_mem *nfit_mem; 742 struct nfit_mem *nfit_mem;
743 u16 physical_id;
724 744
725 mutex_lock(&acpi_desc_lock); 745 mutex_lock(&acpi_desc_lock);
726 list_for_each_entry(acpi_desc, &acpi_descs, list) { 746 list_for_each_entry(acpi_desc, &acpi_descs, list) {
@@ -728,10 +748,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
728 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 748 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
729 memdev = __to_nfit_memdev(nfit_mem); 749 memdev = __to_nfit_memdev(nfit_mem);
730 if (memdev->device_handle == device_handle) { 750 if (memdev->device_handle == device_handle) {
751 *flags = memdev->flags;
752 physical_id = memdev->physical_id;
731 mutex_unlock(&acpi_desc->init_mutex); 753 mutex_unlock(&acpi_desc->init_mutex);
732 mutex_unlock(&acpi_desc_lock); 754 mutex_unlock(&acpi_desc_lock);
733 *flags = memdev->flags; 755 return physical_id;
734 return memdev->physical_id;
735 } 756 }
736 } 757 }
737 mutex_unlock(&acpi_desc->init_mutex); 758 mutex_unlock(&acpi_desc->init_mutex);
@@ -1872,6 +1893,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1872 return 0; 1893 return 0;
1873 } 1894 }
1874 1895
1896 /*
1897 * Function 0 is the command interrogation function, don't
1898 * export it to potential userspace use, and enable it to be
1899 * used as an error value in acpi_nfit_ctl().
1900 */
1901 dsm_mask &= ~1UL;
1902
1875 guid = to_nfit_uuid(nfit_mem->family); 1903 guid = to_nfit_uuid(nfit_mem->family);
1876 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1904 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1877 if (acpi_check_dsm(adev_dimm->handle, guid, 1905 if (acpi_check_dsm(adev_dimm->handle, guid,
@@ -2047,11 +2075,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
2047 if (!nvdimm) 2075 if (!nvdimm)
2048 continue; 2076 continue;
2049 2077
2050 rc = nvdimm_security_setup_events(nvdimm);
2051 if (rc < 0)
2052 dev_warn(acpi_desc->dev,
2053 "security event setup failed: %d\n", rc);
2054
2055 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 2078 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
2056 if (nfit_kernfs) 2079 if (nfit_kernfs)
2057 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 2080 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
@@ -2231,7 +2254,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2231 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2254 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2232 if (!nd_set) 2255 if (!nd_set)
2233 return -ENOMEM; 2256 return -ENOMEM;
2234 ndr_desc->nd_set = nd_set;
2235 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); 2257 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
2236 2258
2237 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 2259 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
@@ -3367,7 +3389,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init);
3367 3389
3368static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3390static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3369{ 3391{
3370 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3392 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3371 struct device *dev = acpi_desc->dev; 3393 struct device *dev = acpi_desc->dev;
3372 3394
3373 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3395 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
@@ -3384,7 +3406,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3384static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3406static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3385 struct nvdimm *nvdimm, unsigned int cmd) 3407 struct nvdimm *nvdimm, unsigned int cmd)
3386{ 3408{
3387 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3409 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3388 3410
3389 if (nvdimm) 3411 if (nvdimm)
3390 return 0; 3412 return 0;
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 850b2927b4e7..f70de71f79d6 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
146 146
147static void nvdimm_invalidate_cache(void); 147static void nvdimm_invalidate_cache(void);
148 148
149static int intel_security_unlock(struct nvdimm *nvdimm, 149static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
150 const struct nvdimm_key_data *key_data) 150 const struct nvdimm_key_data *key_data)
151{ 151{
152 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 152 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm,
227 return 0; 227 return 0;
228} 228}
229 229
230static int intel_security_erase(struct nvdimm *nvdimm, 230static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
231 const struct nvdimm_key_data *key, 231 const struct nvdimm_key_data *key,
232 enum nvdimm_passphrase_type ptype) 232 enum nvdimm_passphrase_type ptype)
233{ 233{
@@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm,
276 return 0; 276 return 0;
277} 277}
278 278
279static int intel_security_query_overwrite(struct nvdimm *nvdimm) 279static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
280{ 280{
281 int rc; 281 int rc;
282 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 282 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm)
313 return 0; 313 return 0;
314} 314}
315 315
316static int intel_security_overwrite(struct nvdimm *nvdimm, 316static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
317 const struct nvdimm_key_data *nkey) 317 const struct nvdimm_key_data *nkey)
318{ 318{
319 int rc; 319 int rc;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 274699463b4f..7bbbf8256a41 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -146,9 +146,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
146 { 146 {
147 struct acpi_srat_mem_affinity *p = 147 struct acpi_srat_mem_affinity *p =
148 (struct acpi_srat_mem_affinity *)header; 148 (struct acpi_srat_mem_affinity *)header;
149 pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", 149 pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
150 (unsigned long)p->base_address, 150 (unsigned long long)p->base_address,
151 (unsigned long)p->length, 151 (unsigned long long)p->length,
152 p->proximity_domain, 152 p->proximity_domain,
153 (p->flags & ACPI_SRAT_MEM_ENABLED) ? 153 (p->flags & ACPI_SRAT_MEM_ENABLED) ?
154 "enabled" : "disabled", 154 "enabled" : "disabled",
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 2579675b7082..e7c0006e6602 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -20,8 +20,11 @@
20#define GPI1_LDO_ON (3 << 0) 20#define GPI1_LDO_ON (3 << 0)
21#define GPI1_LDO_OFF (4 << 0) 21#define GPI1_LDO_OFF (4 << 0)
22 22
23#define AXP288_ADC_TS_PIN_GPADC 0xf2 23#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
24#define AXP288_ADC_TS_PIN_ON 0xf3 24#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
25#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
26#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
27#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
25 28
26static struct pmic_table power_table[] = { 29static struct pmic_table power_table[] = {
27 { 30 {
@@ -212,22 +215,44 @@ out:
212 */ 215 */
213static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) 216static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
214{ 217{
218 int ret, adc_ts_pin_ctrl;
215 u8 buf[2]; 219 u8 buf[2];
216 int ret;
217 220
218 ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, 221 /*
219 AXP288_ADC_TS_PIN_GPADC); 222 * The current-source used for the battery temp-sensor (TS) is shared
223 * with the GPADC. For proper fuel-gauge and charger operation the TS
224 * current-source needs to be permanently on. But to read the GPADC we
225 * need to temporary switch the TS current-source to ondemand, so that
226 * the GPADC can use it, otherwise we will always read an all 0 value.
227 *
228 * Note that the switching from on to on-ondemand is not necessary
229 * when the TS current-source is off (this happens on devices which
230 * do not use the TS-pin).
231 */
232 ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl);
220 if (ret) 233 if (ret)
221 return ret; 234 return ret;
222 235
223 /* After switching to the GPADC pin give things some time to settle */ 236 if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
224 usleep_range(6000, 10000); 237 ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
238 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
239 AXP288_ADC_TS_CURRENT_ON_ONDEMAND);
240 if (ret)
241 return ret;
242
243 /* Wait a bit after switching the current-source */
244 usleep_range(6000, 10000);
245 }
225 246
226 ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); 247 ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
227 if (ret == 0) 248 if (ret == 0)
228 ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); 249 ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
229 250
230 regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON); 251 if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
252 regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
253 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
254 AXP288_ADC_TS_CURRENT_ON);
255 }
231 256
232 return ret; 257 return ret;
233} 258}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 1b475bc1ae16..665e93ca0b40 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list)
131 } 131 }
132} 132}
133 133
134static bool acpi_power_resource_is_dup(union acpi_object *package,
135 unsigned int start, unsigned int i)
136{
137 acpi_handle rhandle, dup;
138 unsigned int j;
139
140 /* The caller is expected to check the package element types */
141 rhandle = package->package.elements[i].reference.handle;
142 for (j = start; j < i; j++) {
143 dup = package->package.elements[j].reference.handle;
144 if (dup == rhandle)
145 return true;
146 }
147
148 return false;
149}
150
134int acpi_extract_power_resources(union acpi_object *package, unsigned int start, 151int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
135 struct list_head *list) 152 struct list_head *list)
136{ 153{
@@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
150 err = -ENODEV; 167 err = -ENODEV;
151 break; 168 break;
152 } 169 }
170
171 /* Some ACPI tables contain duplicate power resource references */
172 if (acpi_power_resource_is_dup(package, start, i))
173 continue;
174
153 err = acpi_add_power_resource(rhandle); 175 err = acpi_add_power_resource(rhandle);
154 if (err) 176 if (err)
155 break; 177 break;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index cdfc87629efb..4d2b2ad1ee0e 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -5854,9 +5854,10 @@ static int __init init_binder_device(const char *name)
5854static int __init binder_init(void) 5854static int __init binder_init(void)
5855{ 5855{
5856 int ret; 5856 int ret;
5857 char *device_name, *device_names, *device_tmp; 5857 char *device_name, *device_tmp;
5858 struct binder_device *device; 5858 struct binder_device *device;
5859 struct hlist_node *tmp; 5859 struct hlist_node *tmp;
5860 char *device_names = NULL;
5860 5861
5861 ret = binder_alloc_shrinker_init(); 5862 ret = binder_alloc_shrinker_init();
5862 if (ret) 5863 if (ret)
@@ -5898,23 +5899,29 @@ static int __init binder_init(void)
5898 &transaction_log_fops); 5899 &transaction_log_fops);
5899 } 5900 }
5900 5901
5901 /* 5902 if (strcmp(binder_devices_param, "") != 0) {
5902 * Copy the module_parameter string, because we don't want to 5903 /*
5903 * tokenize it in-place. 5904 * Copy the module_parameter string, because we don't want to
5904 */ 5905 * tokenize it in-place.
5905 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 5906 */
5906 if (!device_names) { 5907 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5907 ret = -ENOMEM; 5908 if (!device_names) {
5908 goto err_alloc_device_names_failed; 5909 ret = -ENOMEM;
5909 } 5910 goto err_alloc_device_names_failed;
5911 }
5910 5912
5911 device_tmp = device_names; 5913 device_tmp = device_names;
5912 while ((device_name = strsep(&device_tmp, ","))) { 5914 while ((device_name = strsep(&device_tmp, ","))) {
5913 ret = init_binder_device(device_name); 5915 ret = init_binder_device(device_name);
5914 if (ret) 5916 if (ret)
5915 goto err_init_binder_device_failed; 5917 goto err_init_binder_device_failed;
5918 }
5916 } 5919 }
5917 5920
5921 ret = init_binderfs();
5922 if (ret)
5923 goto err_init_binder_device_failed;
5924
5918 return ret; 5925 return ret;
5919 5926
5920err_init_binder_device_failed: 5927err_init_binder_device_failed:
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 7fb97f503ef2..045b3e42d98b 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode)
46} 46}
47#endif 47#endif
48 48
49#ifdef CONFIG_ANDROID_BINDERFS
50extern int __init init_binderfs(void);
51#else
52static inline int __init init_binderfs(void)
53{
54 return 0;
55}
56#endif
57
49#endif /* _LINUX_BINDER_INTERNAL_H */ 58#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 7496b10532aa..e773f45d19d9 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -11,6 +11,7 @@
11#include <linux/kdev_t.h> 11#include <linux/kdev_t.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/namei.h>
14#include <linux/magic.h> 15#include <linux/magic.h>
15#include <linux/major.h> 16#include <linux/major.h>
16#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
@@ -20,6 +21,7 @@
20#include <linux/parser.h> 21#include <linux/parser.h>
21#include <linux/radix-tree.h> 22#include <linux/radix-tree.h>
22#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/seq_file.h>
23#include <linux/slab.h> 25#include <linux/slab.h>
24#include <linux/spinlock_types.h> 26#include <linux/spinlock_types.h>
25#include <linux/stddef.h> 27#include <linux/stddef.h>
@@ -30,7 +32,7 @@
30#include <linux/xarray.h> 32#include <linux/xarray.h>
31#include <uapi/asm-generic/errno-base.h> 33#include <uapi/asm-generic/errno-base.h>
32#include <uapi/linux/android/binder.h> 34#include <uapi/linux/android/binder.h>
33#include <uapi/linux/android/binder_ctl.h> 35#include <uapi/linux/android/binderfs.h>
34 36
35#include "binder_internal.h" 37#include "binder_internal.h"
36 38
@@ -39,14 +41,32 @@
39#define INODE_OFFSET 3 41#define INODE_OFFSET 3
40#define INTSTRLEN 21 42#define INTSTRLEN 21
41#define BINDERFS_MAX_MINOR (1U << MINORBITS) 43#define BINDERFS_MAX_MINOR (1U << MINORBITS)
42 44/* Ensure that the initial ipc namespace always has devices available. */
43static struct vfsmount *binderfs_mnt; 45#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
44 46
45static dev_t binderfs_dev; 47static dev_t binderfs_dev;
46static DEFINE_MUTEX(binderfs_minors_mutex); 48static DEFINE_MUTEX(binderfs_minors_mutex);
47static DEFINE_IDA(binderfs_minors); 49static DEFINE_IDA(binderfs_minors);
48 50
49/** 51/**
52 * binderfs_mount_opts - mount options for binderfs
53 * @max: maximum number of allocatable binderfs binder devices
54 */
55struct binderfs_mount_opts {
56 int max;
57};
58
59enum {
60 Opt_max,
61 Opt_err
62};
63
64static const match_table_t tokens = {
65 { Opt_max, "max=%d" },
66 { Opt_err, NULL }
67};
68
69/**
50 * binderfs_info - information about a binderfs mount 70 * binderfs_info - information about a binderfs mount
51 * @ipc_ns: The ipc namespace the binderfs mount belongs to. 71 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
52 * @control_dentry: This records the dentry of this binderfs mount 72 * @control_dentry: This records the dentry of this binderfs mount
@@ -55,13 +75,16 @@ static DEFINE_IDA(binderfs_minors);
55 * created. 75 * created.
56 * @root_gid: gid that needs to be used when a new binder device is 76 * @root_gid: gid that needs to be used when a new binder device is
57 * created. 77 * created.
78 * @mount_opts: The mount options in use.
79 * @device_count: The current number of allocated binder devices.
58 */ 80 */
59struct binderfs_info { 81struct binderfs_info {
60 struct ipc_namespace *ipc_ns; 82 struct ipc_namespace *ipc_ns;
61 struct dentry *control_dentry; 83 struct dentry *control_dentry;
62 kuid_t root_uid; 84 kuid_t root_uid;
63 kgid_t root_gid; 85 kgid_t root_gid;
64 86 struct binderfs_mount_opts mount_opts;
87 int device_count;
65}; 88};
66 89
67static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) 90static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
@@ -84,7 +107,7 @@ bool is_binderfs_device(const struct inode *inode)
84 * @userp: buffer to copy information about new device for userspace to 107 * @userp: buffer to copy information about new device for userspace to
85 * @req: struct binderfs_device as copied from userspace 108 * @req: struct binderfs_device as copied from userspace
86 * 109 *
87 * This function allocated a new binder_device and reserves a new minor 110 * This function allocates a new binder_device and reserves a new minor
88 * number for it. 111 * number for it.
89 * Minor numbers are limited and tracked globally in binderfs_minors. The 112 * Minor numbers are limited and tracked globally in binderfs_minors. The
90 * function will stash a struct binder_device for the specific binder 113 * function will stash a struct binder_device for the specific binder
@@ -100,20 +123,34 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
100 struct binderfs_device *req) 123 struct binderfs_device *req)
101{ 124{
102 int minor, ret; 125 int minor, ret;
103 struct dentry *dentry, *dup, *root; 126 struct dentry *dentry, *root;
104 struct binder_device *device; 127 struct binder_device *device;
105 size_t name_len = BINDERFS_MAX_NAME + 1;
106 char *name = NULL; 128 char *name = NULL;
129 size_t name_len;
107 struct inode *inode = NULL; 130 struct inode *inode = NULL;
108 struct super_block *sb = ref_inode->i_sb; 131 struct super_block *sb = ref_inode->i_sb;
109 struct binderfs_info *info = sb->s_fs_info; 132 struct binderfs_info *info = sb->s_fs_info;
133#if defined(CONFIG_IPC_NS)
134 bool use_reserve = (info->ipc_ns == &init_ipc_ns);
135#else
136 bool use_reserve = true;
137#endif
110 138
111 /* Reserve new minor number for the new device. */ 139 /* Reserve new minor number for the new device. */
112 mutex_lock(&binderfs_minors_mutex); 140 mutex_lock(&binderfs_minors_mutex);
113 minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); 141 if (++info->device_count <= info->mount_opts.max)
114 mutex_unlock(&binderfs_minors_mutex); 142 minor = ida_alloc_max(&binderfs_minors,
115 if (minor < 0) 143 use_reserve ? BINDERFS_MAX_MINOR :
144 BINDERFS_MAX_MINOR_CAPPED,
145 GFP_KERNEL);
146 else
147 minor = -ENOSPC;
148 if (minor < 0) {
149 --info->device_count;
150 mutex_unlock(&binderfs_minors_mutex);
116 return minor; 151 return minor;
152 }
153 mutex_unlock(&binderfs_minors_mutex);
117 154
118 ret = -ENOMEM; 155 ret = -ENOMEM;
119 device = kzalloc(sizeof(*device), GFP_KERNEL); 156 device = kzalloc(sizeof(*device), GFP_KERNEL);
@@ -132,12 +169,13 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
132 inode->i_uid = info->root_uid; 169 inode->i_uid = info->root_uid;
133 inode->i_gid = info->root_gid; 170 inode->i_gid = info->root_gid;
134 171
135 name = kmalloc(name_len, GFP_KERNEL); 172 req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
173 name_len = strlen(req->name);
174 /* Make sure to include terminating NUL byte */
175 name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
136 if (!name) 176 if (!name)
137 goto err; 177 goto err;
138 178
139 strscpy(name, req->name, name_len);
140
141 device->binderfs_inode = inode; 179 device->binderfs_inode = inode;
142 device->context.binder_context_mgr_uid = INVALID_UID; 180 device->context.binder_context_mgr_uid = INVALID_UID;
143 device->context.name = name; 181 device->context.name = name;
@@ -156,28 +194,25 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
156 194
157 root = sb->s_root; 195 root = sb->s_root;
158 inode_lock(d_inode(root)); 196 inode_lock(d_inode(root));
159 dentry = d_alloc_name(root, name); 197
160 if (!dentry) { 198 /* look it up */
199 dentry = lookup_one_len(name, root, name_len);
200 if (IS_ERR(dentry)) {
161 inode_unlock(d_inode(root)); 201 inode_unlock(d_inode(root));
162 ret = -ENOMEM; 202 ret = PTR_ERR(dentry);
163 goto err; 203 goto err;
164 } 204 }
165 205
166 /* Verify that the name userspace gave us is not already in use. */ 206 if (d_really_is_positive(dentry)) {
167 dup = d_lookup(root, &dentry->d_name); 207 /* already exists */
168 if (dup) { 208 dput(dentry);
169 if (d_really_is_positive(dup)) { 209 inode_unlock(d_inode(root));
170 dput(dup); 210 ret = -EEXIST;
171 dput(dentry); 211 goto err;
172 inode_unlock(d_inode(root));
173 ret = -EEXIST;
174 goto err;
175 }
176 dput(dup);
177 } 212 }
178 213
179 inode->i_private = device; 214 inode->i_private = device;
180 d_add(dentry, inode); 215 d_instantiate(dentry, inode);
181 fsnotify_create(root->d_inode, dentry); 216 fsnotify_create(root->d_inode, dentry);
182 inode_unlock(d_inode(root)); 217 inode_unlock(d_inode(root));
183 218
@@ -187,6 +222,7 @@ err:
187 kfree(name); 222 kfree(name);
188 kfree(device); 223 kfree(device);
189 mutex_lock(&binderfs_minors_mutex); 224 mutex_lock(&binderfs_minors_mutex);
225 --info->device_count;
190 ida_free(&binderfs_minors, minor); 226 ida_free(&binderfs_minors, minor);
191 mutex_unlock(&binderfs_minors_mutex); 227 mutex_unlock(&binderfs_minors_mutex);
192 iput(inode); 228 iput(inode);
@@ -232,6 +268,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
232static void binderfs_evict_inode(struct inode *inode) 268static void binderfs_evict_inode(struct inode *inode)
233{ 269{
234 struct binder_device *device = inode->i_private; 270 struct binder_device *device = inode->i_private;
271 struct binderfs_info *info = BINDERFS_I(inode);
235 272
236 clear_inode(inode); 273 clear_inode(inode);
237 274
@@ -239,6 +276,7 @@ static void binderfs_evict_inode(struct inode *inode)
239 return; 276 return;
240 277
241 mutex_lock(&binderfs_minors_mutex); 278 mutex_lock(&binderfs_minors_mutex);
279 --info->device_count;
242 ida_free(&binderfs_minors, device->miscdev.minor); 280 ida_free(&binderfs_minors, device->miscdev.minor);
243 mutex_unlock(&binderfs_minors_mutex); 281 mutex_unlock(&binderfs_minors_mutex);
244 282
@@ -246,43 +284,87 @@ static void binderfs_evict_inode(struct inode *inode)
246 kfree(device); 284 kfree(device);
247} 285}
248 286
287/**
288 * binderfs_parse_mount_opts - parse binderfs mount options
289 * @data: options to set (can be NULL in which case defaults are used)
290 */
291static int binderfs_parse_mount_opts(char *data,
292 struct binderfs_mount_opts *opts)
293{
294 char *p;
295 opts->max = BINDERFS_MAX_MINOR;
296
297 while ((p = strsep(&data, ",")) != NULL) {
298 substring_t args[MAX_OPT_ARGS];
299 int token;
300 int max_devices;
301
302 if (!*p)
303 continue;
304
305 token = match_token(p, tokens, args);
306 switch (token) {
307 case Opt_max:
308 if (match_int(&args[0], &max_devices) ||
309 (max_devices < 0 ||
310 (max_devices > BINDERFS_MAX_MINOR)))
311 return -EINVAL;
312
313 opts->max = max_devices;
314 break;
315 default:
316 pr_err("Invalid mount options\n");
317 return -EINVAL;
318 }
319 }
320
321 return 0;
322}
323
324static int binderfs_remount(struct super_block *sb, int *flags, char *data)
325{
326 struct binderfs_info *info = sb->s_fs_info;
327 return binderfs_parse_mount_opts(data, &info->mount_opts);
328}
329
330static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
331{
332 struct binderfs_info *info;
333
334 info = root->d_sb->s_fs_info;
335 if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
336 seq_printf(seq, ",max=%d", info->mount_opts.max);
337
338 return 0;
339}
340
249static const struct super_operations binderfs_super_ops = { 341static const struct super_operations binderfs_super_ops = {
250 .statfs = simple_statfs, 342 .evict_inode = binderfs_evict_inode,
251 .evict_inode = binderfs_evict_inode, 343 .remount_fs = binderfs_remount,
344 .show_options = binderfs_show_mount_opts,
345 .statfs = simple_statfs,
252}; 346};
253 347
348static inline bool is_binderfs_control_device(const struct dentry *dentry)
349{
350 struct binderfs_info *info = dentry->d_sb->s_fs_info;
351 return info->control_dentry == dentry;
352}
353
254static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, 354static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
255 struct inode *new_dir, struct dentry *new_dentry, 355 struct inode *new_dir, struct dentry *new_dentry,
256 unsigned int flags) 356 unsigned int flags)
257{ 357{
258 struct inode *inode = d_inode(old_dentry); 358 if (is_binderfs_control_device(old_dentry) ||
259 359 is_binderfs_control_device(new_dentry))
260 /* binderfs doesn't support directories. */
261 if (d_is_dir(old_dentry))
262 return -EPERM; 360 return -EPERM;
263 361
264 if (flags & ~RENAME_NOREPLACE) 362 return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
265 return -EINVAL;
266
267 if (!simple_empty(new_dentry))
268 return -ENOTEMPTY;
269
270 if (d_really_is_positive(new_dentry))
271 simple_unlink(new_dir, new_dentry);
272
273 old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
274 new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
275
276 return 0;
277} 363}
278 364
279static int binderfs_unlink(struct inode *dir, struct dentry *dentry) 365static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
280{ 366{
281 /* 367 if (is_binderfs_control_device(dentry))
282 * The control dentry is only ever touched during mount so checking it
283 * here should not require us to take lock.
284 */
285 if (BINDERFS_I(dir)->control_dentry == dentry)
286 return -EPERM; 368 return -EPERM;
287 369
288 return simple_unlink(dir, dentry); 370 return simple_unlink(dir, dentry);
@@ -313,13 +395,16 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
313 struct inode *inode = NULL; 395 struct inode *inode = NULL;
314 struct dentry *root = sb->s_root; 396 struct dentry *root = sb->s_root;
315 struct binderfs_info *info = sb->s_fs_info; 397 struct binderfs_info *info = sb->s_fs_info;
398#if defined(CONFIG_IPC_NS)
399 bool use_reserve = (info->ipc_ns == &init_ipc_ns);
400#else
401 bool use_reserve = true;
402#endif
316 403
317 device = kzalloc(sizeof(*device), GFP_KERNEL); 404 device = kzalloc(sizeof(*device), GFP_KERNEL);
318 if (!device) 405 if (!device)
319 return -ENOMEM; 406 return -ENOMEM;
320 407
321 inode_lock(d_inode(root));
322
323 /* If we have already created a binder-control node, return. */ 408 /* If we have already created a binder-control node, return. */
324 if (info->control_dentry) { 409 if (info->control_dentry) {
325 ret = 0; 410 ret = 0;
@@ -333,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
333 418
334 /* Reserve a new minor number for the new device. */ 419 /* Reserve a new minor number for the new device. */
335 mutex_lock(&binderfs_minors_mutex); 420 mutex_lock(&binderfs_minors_mutex);
336 minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); 421 minor = ida_alloc_max(&binderfs_minors,
422 use_reserve ? BINDERFS_MAX_MINOR :
423 BINDERFS_MAX_MINOR_CAPPED,
424 GFP_KERNEL);
337 mutex_unlock(&binderfs_minors_mutex); 425 mutex_unlock(&binderfs_minors_mutex);
338 if (minor < 0) { 426 if (minor < 0) {
339 ret = minor; 427 ret = minor;
@@ -358,12 +446,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
358 inode->i_private = device; 446 inode->i_private = device;
359 info->control_dentry = dentry; 447 info->control_dentry = dentry;
360 d_add(dentry, inode); 448 d_add(dentry, inode);
361 inode_unlock(d_inode(root));
362 449
363 return 0; 450 return 0;
364 451
365out: 452out:
366 inode_unlock(d_inode(root));
367 kfree(device); 453 kfree(device);
368 iput(inode); 454 iput(inode);
369 455
@@ -378,12 +464,9 @@ static const struct inode_operations binderfs_dir_inode_operations = {
378 464
379static int binderfs_fill_super(struct super_block *sb, void *data, int silent) 465static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
380{ 466{
467 int ret;
381 struct binderfs_info *info; 468 struct binderfs_info *info;
382 int ret = -ENOMEM;
383 struct inode *inode = NULL; 469 struct inode *inode = NULL;
384 struct ipc_namespace *ipc_ns = sb->s_fs_info;
385
386 get_ipc_ns(ipc_ns);
387 470
388 sb->s_blocksize = PAGE_SIZE; 471 sb->s_blocksize = PAGE_SIZE;
389 sb->s_blocksize_bits = PAGE_SHIFT; 472 sb->s_blocksize_bits = PAGE_SHIFT;
@@ -405,11 +488,17 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
405 sb->s_op = &binderfs_super_ops; 488 sb->s_op = &binderfs_super_ops;
406 sb->s_time_gran = 1; 489 sb->s_time_gran = 1;
407 490
408 info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); 491 sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
409 if (!info) 492 if (!sb->s_fs_info)
410 goto err_without_dentry; 493 return -ENOMEM;
494 info = sb->s_fs_info;
495
496 info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
497
498 ret = binderfs_parse_mount_opts(data, &info->mount_opts);
499 if (ret)
500 return ret;
411 501
412 info->ipc_ns = ipc_ns;
413 info->root_gid = make_kgid(sb->s_user_ns, 0); 502 info->root_gid = make_kgid(sb->s_user_ns, 0);
414 if (!gid_valid(info->root_gid)) 503 if (!gid_valid(info->root_gid))
415 info->root_gid = GLOBAL_ROOT_GID; 504 info->root_gid = GLOBAL_ROOT_GID;
@@ -417,11 +506,9 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
417 if (!uid_valid(info->root_uid)) 506 if (!uid_valid(info->root_uid))
418 info->root_uid = GLOBAL_ROOT_UID; 507 info->root_uid = GLOBAL_ROOT_UID;
419 508
420 sb->s_fs_info = info;
421
422 inode = new_inode(sb); 509 inode = new_inode(sb);
423 if (!inode) 510 if (!inode)
424 goto err_without_dentry; 511 return -ENOMEM;
425 512
426 inode->i_ino = FIRST_INODE; 513 inode->i_ino = FIRST_INODE;
427 inode->i_fop = &simple_dir_operations; 514 inode->i_fop = &simple_dir_operations;
@@ -432,79 +519,28 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
432 519
433 sb->s_root = d_make_root(inode); 520 sb->s_root = d_make_root(inode);
434 if (!sb->s_root) 521 if (!sb->s_root)
435 goto err_without_dentry; 522 return -ENOMEM;
436
437 ret = binderfs_binder_ctl_create(sb);
438 if (ret)
439 goto err_with_dentry;
440
441 return 0;
442
443err_with_dentry:
444 dput(sb->s_root);
445 sb->s_root = NULL;
446
447err_without_dentry:
448 put_ipc_ns(ipc_ns);
449 iput(inode);
450 kfree(info);
451
452 return ret;
453}
454
455static int binderfs_test_super(struct super_block *sb, void *data)
456{
457 struct binderfs_info *info = sb->s_fs_info;
458
459 if (info)
460 return info->ipc_ns == data;
461
462 return 0;
463}
464 523
465static int binderfs_set_super(struct super_block *sb, void *data) 524 return binderfs_binder_ctl_create(sb);
466{
467 sb->s_fs_info = data;
468 return set_anon_super(sb, NULL);
469} 525}
470 526
471static struct dentry *binderfs_mount(struct file_system_type *fs_type, 527static struct dentry *binderfs_mount(struct file_system_type *fs_type,
472 int flags, const char *dev_name, 528 int flags, const char *dev_name,
473 void *data) 529 void *data)
474{ 530{
475 struct super_block *sb; 531 return mount_nodev(fs_type, flags, data, binderfs_fill_super);
476 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
477
478 if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN))
479 return ERR_PTR(-EPERM);
480
481 sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super,
482 flags, ipc_ns->user_ns, ipc_ns);
483 if (IS_ERR(sb))
484 return ERR_CAST(sb);
485
486 if (!sb->s_root) {
487 int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
488 if (ret) {
489 deactivate_locked_super(sb);
490 return ERR_PTR(ret);
491 }
492
493 sb->s_flags |= SB_ACTIVE;
494 }
495
496 return dget(sb->s_root);
497} 532}
498 533
499static void binderfs_kill_super(struct super_block *sb) 534static void binderfs_kill_super(struct super_block *sb)
500{ 535{
501 struct binderfs_info *info = sb->s_fs_info; 536 struct binderfs_info *info = sb->s_fs_info;
502 537
538 kill_litter_super(sb);
539
503 if (info && info->ipc_ns) 540 if (info && info->ipc_ns)
504 put_ipc_ns(info->ipc_ns); 541 put_ipc_ns(info->ipc_ns);
505 542
506 kfree(info); 543 kfree(info);
507 kill_litter_super(sb);
508} 544}
509 545
510static struct file_system_type binder_fs_type = { 546static struct file_system_type binder_fs_type = {
@@ -514,7 +550,7 @@ static struct file_system_type binder_fs_type = {
514 .fs_flags = FS_USERNS_MOUNT, 550 .fs_flags = FS_USERNS_MOUNT,
515}; 551};
516 552
517static int __init init_binderfs(void) 553int __init init_binderfs(void)
518{ 554{
519 int ret; 555 int ret;
520 556
@@ -530,15 +566,5 @@ static int __init init_binderfs(void)
530 return ret; 566 return ret;
531 } 567 }
532 568
533 binderfs_mnt = kern_mount(&binder_fs_type);
534 if (IS_ERR(binderfs_mnt)) {
535 ret = PTR_ERR(binderfs_mnt);
536 binderfs_mnt = NULL;
537 unregister_filesystem(&binder_fs_type);
538 unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
539 }
540
541 return ret; 569 return ret;
542} 570}
543
544device_initcall(init_binderfs);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4ca7a6b4eaae..8218db17ebdb 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers"
1091 1091
1092config PATA_ACPI 1092config PATA_ACPI
1093 tristate "ACPI firmware driver for PATA" 1093 tristate "ACPI firmware driver for PATA"
1094 depends on ATA_ACPI && ATA_BMDMA 1094 depends on ATA_ACPI && ATA_BMDMA && PCI
1095 help 1095 help
1096 This option enables an ACPI method driver which drives 1096 This option enables an ACPI method driver which drives
1097 motherboard PATA controller interfaces through the ACPI 1097 motherboard PATA controller interfaces through the ACPI
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index ef356e70e6de..8810475f307a 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -254,6 +254,8 @@ enum {
254 AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use 254 AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
255 SATA_MOBILE_LPM_POLICY 255 SATA_MOBILE_LPM_POLICY
256 as default lpm_policy */ 256 as default lpm_policy */
257 AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
258 suspend/resume */
257 259
258 /* ap->flags bits */ 260 /* ap->flags bits */
259 261
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index f9cb51be38eb..d4bba3ace45d 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -28,6 +28,11 @@
28#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4)) 28#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4))
29#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4)) 29#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4))
30 30
31struct ahci_mvebu_plat_data {
32 int (*plat_config)(struct ahci_host_priv *hpriv);
33 unsigned int flags;
34};
35
31static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, 36static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
32 const struct mbus_dram_target_info *dram) 37 const struct mbus_dram_target_info *dram)
33{ 38{
@@ -62,6 +67,35 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
62 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); 67 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
63} 68}
64 69
70static int ahci_mvebu_armada_380_config(struct ahci_host_priv *hpriv)
71{
72 const struct mbus_dram_target_info *dram;
73 int rc = 0;
74
75 dram = mv_mbus_dram_info();
76 if (dram)
77 ahci_mvebu_mbus_config(hpriv, dram);
78 else
79 rc = -ENODEV;
80
81 ahci_mvebu_regret_option(hpriv);
82
83 return rc;
84}
85
86static int ahci_mvebu_armada_3700_config(struct ahci_host_priv *hpriv)
87{
88 u32 reg;
89
90 writel(0, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR);
91
92 reg = readl(hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
93 reg |= BIT(6);
94 writel(reg, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
95
96 return 0;
97}
98
65/** 99/**
66 * ahci_mvebu_stop_engine 100 * ahci_mvebu_stop_engine
67 * 101 *
@@ -126,13 +160,9 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
126{ 160{
127 struct ata_host *host = platform_get_drvdata(pdev); 161 struct ata_host *host = platform_get_drvdata(pdev);
128 struct ahci_host_priv *hpriv = host->private_data; 162 struct ahci_host_priv *hpriv = host->private_data;
129 const struct mbus_dram_target_info *dram; 163 const struct ahci_mvebu_plat_data *pdata = hpriv->plat_data;
130 164
131 dram = mv_mbus_dram_info(); 165 pdata->plat_config(hpriv);
132 if (dram)
133 ahci_mvebu_mbus_config(hpriv, dram);
134
135 ahci_mvebu_regret_option(hpriv);
136 166
137 return ahci_platform_resume_host(&pdev->dev); 167 return ahci_platform_resume_host(&pdev->dev);
138} 168}
@@ -154,29 +184,30 @@ static struct scsi_host_template ahci_platform_sht = {
154 184
155static int ahci_mvebu_probe(struct platform_device *pdev) 185static int ahci_mvebu_probe(struct platform_device *pdev)
156{ 186{
187 const struct ahci_mvebu_plat_data *pdata;
157 struct ahci_host_priv *hpriv; 188 struct ahci_host_priv *hpriv;
158 const struct mbus_dram_target_info *dram;
159 int rc; 189 int rc;
160 190
191 pdata = of_device_get_match_data(&pdev->dev);
192 if (!pdata)
193 return -EINVAL;
194
161 hpriv = ahci_platform_get_resources(pdev, 0); 195 hpriv = ahci_platform_get_resources(pdev, 0);
162 if (IS_ERR(hpriv)) 196 if (IS_ERR(hpriv))
163 return PTR_ERR(hpriv); 197 return PTR_ERR(hpriv);
164 198
199 hpriv->flags |= pdata->flags;
200 hpriv->plat_data = (void *)pdata;
201
165 rc = ahci_platform_enable_resources(hpriv); 202 rc = ahci_platform_enable_resources(hpriv);
166 if (rc) 203 if (rc)
167 return rc; 204 return rc;
168 205
169 hpriv->stop_engine = ahci_mvebu_stop_engine; 206 hpriv->stop_engine = ahci_mvebu_stop_engine;
170 207
171 if (of_device_is_compatible(pdev->dev.of_node, 208 rc = pdata->plat_config(hpriv);
172 "marvell,armada-380-ahci")) { 209 if (rc)
173 dram = mv_mbus_dram_info(); 210 goto disable_resources;
174 if (!dram)
175 return -ENODEV;
176
177 ahci_mvebu_mbus_config(hpriv, dram);
178 ahci_mvebu_regret_option(hpriv);
179 }
180 211
181 rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info, 212 rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
182 &ahci_platform_sht); 213 &ahci_platform_sht);
@@ -190,18 +221,28 @@ disable_resources:
190 return rc; 221 return rc;
191} 222}
192 223
224static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = {
225 .plat_config = ahci_mvebu_armada_380_config,
226};
227
228static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = {
229 .plat_config = ahci_mvebu_armada_3700_config,
230 .flags = AHCI_HFLAG_SUSPEND_PHYS,
231};
232
193static const struct of_device_id ahci_mvebu_of_match[] = { 233static const struct of_device_id ahci_mvebu_of_match[] = {
194 { .compatible = "marvell,armada-380-ahci", }, 234 {
195 { .compatible = "marvell,armada-3700-ahci", }, 235 .compatible = "marvell,armada-380-ahci",
236 .data = &ahci_mvebu_armada_380_plat_data,
237 },
238 {
239 .compatible = "marvell,armada-3700-ahci",
240 .data = &ahci_mvebu_armada_3700_plat_data,
241 },
196 { }, 242 { },
197}; 243};
198MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match); 244MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
199 245
200/*
201 * We currently don't provide power management related operations,
202 * since there is no suspend/resume support at the platform level for
203 * Armada 38x for the moment.
204 */
205static struct platform_driver ahci_mvebu_driver = { 246static struct platform_driver ahci_mvebu_driver = {
206 .probe = ahci_mvebu_probe, 247 .probe = ahci_mvebu_probe,
207 .remove = ata_platform_remove_one, 248 .remove = ata_platform_remove_one,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 4b900fc659f7..81b1a3332ed6 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -56,6 +56,12 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
56 if (rc) 56 if (rc)
57 goto disable_phys; 57 goto disable_phys;
58 58
59 rc = phy_set_mode(hpriv->phys[i], PHY_MODE_SATA);
60 if (rc) {
61 phy_exit(hpriv->phys[i]);
62 goto disable_phys;
63 }
64
59 rc = phy_power_on(hpriv->phys[i]); 65 rc = phy_power_on(hpriv->phys[i]);
60 if (rc) { 66 if (rc) {
61 phy_exit(hpriv->phys[i]); 67 phy_exit(hpriv->phys[i]);
@@ -738,6 +744,9 @@ int ahci_platform_suspend_host(struct device *dev)
738 writel(ctl, mmio + HOST_CTL); 744 writel(ctl, mmio + HOST_CTL);
739 readl(mmio + HOST_CTL); /* flush */ 745 readl(mmio + HOST_CTL); /* flush */
740 746
747 if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
748 ahci_platform_disable_phys(hpriv);
749
741 return ata_host_suspend(host, PMSG_SUSPEND); 750 return ata_host_suspend(host, PMSG_SUSPEND);
742} 751}
743EXPORT_SYMBOL_GPL(ahci_platform_suspend_host); 752EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
@@ -756,6 +765,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
756int ahci_platform_resume_host(struct device *dev) 765int ahci_platform_resume_host(struct device *dev)
757{ 766{
758 struct ata_host *host = dev_get_drvdata(dev); 767 struct ata_host *host = dev_get_drvdata(dev);
768 struct ahci_host_priv *hpriv = host->private_data;
759 int rc; 769 int rc;
760 770
761 if (dev->power.power_state.event == PM_EVENT_SUSPEND) { 771 if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
@@ -766,6 +776,9 @@ int ahci_platform_resume_host(struct device *dev)
766 ahci_init_controller(host); 776 ahci_init_controller(host);
767 } 777 }
768 778
779 if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
780 ahci_platform_enable_phys(hpriv);
781
769 ata_host_resume(host); 782 ata_host_resume(host);
770 783
771 return 0; 784 return 0;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b8c3f9e6af89..adf28788cab5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4554 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, 4554 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
4555 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, 4555 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
4556 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, 4556 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
4557 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
4557 4558
4558 /* devices that don't properly handle queued TRIM commands */ 4559 /* devices that don't properly handle queued TRIM commands */
4559 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4560 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 8cc9c429ad95..9e7fc302430f 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -915,6 +915,10 @@ static struct scsi_host_template pata_macio_sht = {
915 .sg_tablesize = MAX_DCMDS, 915 .sg_tablesize = MAX_DCMDS,
916 /* We may not need that strict one */ 916 /* We may not need that strict one */
917 .dma_boundary = ATA_DMA_BOUNDARY, 917 .dma_boundary = ATA_DMA_BOUNDARY,
918 /* Not sure what the real max is but we know it's less than 64K, let's
919 * use 64K minus 256
920 */
921 .max_segment_size = MAX_DBDMA_SEG,
918 .slave_configure = pata_macio_slave_config, 922 .slave_configure = pata_macio_slave_config,
919}; 923};
920 924
@@ -1044,11 +1048,6 @@ static int pata_macio_common_init(struct pata_macio_priv *priv,
1044 /* Make sure we have sane initial timings in the cache */ 1048 /* Make sure we have sane initial timings in the cache */
1045 pata_macio_default_timings(priv); 1049 pata_macio_default_timings(priv);
1046 1050
1047 /* Not sure what the real max is but we know it's less than 64K, let's
1048 * use 64K minus 256
1049 */
1050 dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
1051
1052 /* Allocate libata host for 1 port */ 1051 /* Allocate libata host for 1 port */
1053 memset(&pinfo, 0, sizeof(struct ata_port_info)); 1052 memset(&pinfo, 0, sizeof(struct ata_port_info));
1054 pmac_macio_calc_timing_masks(priv, &pinfo); 1053 pmac_macio_calc_timing_masks(priv, &pinfo);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 4dc528bf8e85..9c1247d42897 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -729,8 +729,8 @@ static int sata_fsl_port_start(struct ata_port *ap)
729 if (!pp) 729 if (!pp)
730 return -ENOMEM; 730 return -ENOMEM;
731 731
732 mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 732 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
733 GFP_KERNEL); 733 GFP_KERNEL);
734 if (!mem) { 734 if (!mem) {
735 kfree(pp); 735 kfree(pp);
736 return -ENOMEM; 736 return -ENOMEM;
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e0bcf9b2dab0..174e84ce4379 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -245,8 +245,15 @@ struct inic_port_priv {
245 245
246static struct scsi_host_template inic_sht = { 246static struct scsi_host_template inic_sht = {
247 ATA_BASE_SHT(DRV_NAME), 247 ATA_BASE_SHT(DRV_NAME),
248 .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ 248 .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
249 .dma_boundary = INIC_DMA_BOUNDARY, 249
250 /*
251 * This controller is braindamaged. dma_boundary is 0xffff like others
252 * but it will lock up the whole machine HARD if 65536 byte PRD entry
253 * is fed. Reduce maximum segment size.
254 */
255 .dma_boundary = INIC_DMA_BOUNDARY,
256 .max_segment_size = 65536 - 512,
250}; 257};
251 258
252static const int scr_map[] = { 259static const int scr_map[] = {
@@ -868,17 +875,6 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
868 return rc; 875 return rc;
869 } 876 }
870 877
871 /*
872 * This controller is braindamaged. dma_boundary is 0xffff
873 * like others but it will lock up the whole machine HARD if
874 * 65536 byte PRD entry is fed. Reduce maximum segment size.
875 */
876 rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512);
877 if (rc) {
878 dev_err(&pdev->dev, "failed to set the maximum segment size\n");
879 return rc;
880 }
881
882 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); 878 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
883 if (rc) { 879 if (rc) {
884 dev_err(&pdev->dev, "failed to initialize controller\n"); 880 dev_err(&pdev->dev, "failed to initialize controller\n");
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 29f102dcfec4..211607986134 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -533,9 +533,10 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
533 533
534static int he_init_tpdrq(struct he_dev *he_dev) 534static int he_init_tpdrq(struct he_dev *he_dev)
535{ 535{
536 he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 536 he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), 537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538 &he_dev->tpdrq_phys, GFP_KERNEL); 538 &he_dev->tpdrq_phys,
539 GFP_KERNEL);
539 if (he_dev->tpdrq_base == NULL) { 540 if (he_dev->tpdrq_base == NULL) {
540 hprintk("failed to alloc tpdrq\n"); 541 hprintk("failed to alloc tpdrq\n");
541 return -ENOMEM; 542 return -ENOMEM;
@@ -717,7 +718,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
717 instead of '/ 512', use '>> 9' to prevent a call 718 instead of '/ 512', use '>> 9' to prevent a call
718 to divdu3 on x86 platforms 719 to divdu3 on x86 platforms
719 */ 720 */
720 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; 721 rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
721 722
722 if (rate_cps < 10) 723 if (rate_cps < 10)
723 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ 724 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
@@ -805,9 +806,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
805 goto out_free_rbpl_virt; 806 goto out_free_rbpl_virt;
806 } 807 }
807 808
808 he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 809 he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
809 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), 810 CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
810 &he_dev->rbpl_phys, GFP_KERNEL); 811 &he_dev->rbpl_phys, GFP_KERNEL);
811 if (he_dev->rbpl_base == NULL) { 812 if (he_dev->rbpl_base == NULL) {
812 hprintk("failed to alloc rbpl_base\n"); 813 hprintk("failed to alloc rbpl_base\n");
813 goto out_destroy_rbpl_pool; 814 goto out_destroy_rbpl_pool;
@@ -844,9 +845,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
844 845
845 /* rx buffer ready queue */ 846 /* rx buffer ready queue */
846 847
847 he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 848 he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
848 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 849 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
849 &he_dev->rbrq_phys, GFP_KERNEL); 850 &he_dev->rbrq_phys, GFP_KERNEL);
850 if (he_dev->rbrq_base == NULL) { 851 if (he_dev->rbrq_base == NULL) {
851 hprintk("failed to allocate rbrq\n"); 852 hprintk("failed to allocate rbrq\n");
852 goto out_free_rbpl; 853 goto out_free_rbpl;
@@ -868,9 +869,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
868 869
869 /* tx buffer ready queue */ 870 /* tx buffer ready queue */
870 871
871 he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 872 he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
872 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 873 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
873 &he_dev->tbrq_phys, GFP_KERNEL); 874 &he_dev->tbrq_phys, GFP_KERNEL);
874 if (he_dev->tbrq_base == NULL) { 875 if (he_dev->tbrq_base == NULL) {
875 hprintk("failed to allocate tbrq\n"); 876 hprintk("failed to allocate tbrq\n");
876 goto out_free_rbpq_base; 877 goto out_free_rbpq_base;
@@ -913,11 +914,9 @@ static int he_init_irq(struct he_dev *he_dev)
913 /* 2.9.3.5 tail offset for each interrupt queue is located after the 914 /* 2.9.3.5 tail offset for each interrupt queue is located after the
914 end of the interrupt queue */ 915 end of the interrupt queue */
915 916
916 he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 917 he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
917 (CONFIG_IRQ_SIZE + 1) 918 (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
918 * sizeof(struct he_irq), 919 &he_dev->irq_phys, GFP_KERNEL);
919 &he_dev->irq_phys,
920 GFP_KERNEL);
921 if (he_dev->irq_base == NULL) { 920 if (he_dev->irq_base == NULL) {
922 hprintk("failed to allocate irq\n"); 921 hprintk("failed to allocate irq\n");
923 return -ENOMEM; 922 return -ENOMEM;
@@ -1464,9 +1463,9 @@ static int he_start(struct atm_dev *dev)
1464 1463
1465 /* host status page */ 1464 /* host status page */
1466 1465
1467 he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, 1466 he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
1468 sizeof(struct he_hsp), 1467 sizeof(struct he_hsp),
1469 &he_dev->hsp_phys, GFP_KERNEL); 1468 &he_dev->hsp_phys, GFP_KERNEL);
1470 if (he_dev->hsp == NULL) { 1469 if (he_dev->hsp == NULL) {
1471 hprintk("failed to allocate host status page\n"); 1470 hprintk("failed to allocate host status page\n");
1472 return -ENOMEM; 1471 return -ENOMEM;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 6e737142ceaa..43a14579e80e 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -641,8 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class)
641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); 641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
642 if (!scq) 642 if (!scq)
643 return NULL; 643 return NULL;
644 scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE, 644 scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE,
645 &scq->paddr, GFP_KERNEL); 645 &scq->paddr, GFP_KERNEL);
646 if (scq->base == NULL) { 646 if (scq->base == NULL) {
647 kfree(scq); 647 kfree(scq);
648 return NULL; 648 return NULL;
@@ -971,8 +971,8 @@ init_rsq(struct idt77252_dev *card)
971{ 971{
972 struct rsq_entry *rsqe; 972 struct rsq_entry *rsqe;
973 973
974 card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE, 974 card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
975 &card->rsq.paddr, GFP_KERNEL); 975 &card->rsq.paddr, GFP_KERNEL);
976 if (card->rsq.base == NULL) { 976 if (card->rsq.base == NULL) {
977 printk("%s: can't allocate RSQ.\n", card->name); 977 printk("%s: can't allocate RSQ.\n", card->name);
978 return -1; 978 return -1;
@@ -3390,10 +3390,10 @@ static int init_card(struct atm_dev *dev)
3390 writel(0, SAR_REG_GP); 3390 writel(0, SAR_REG_GP);
3391 3391
3392 /* Initialize RAW Cell Handle Register */ 3392 /* Initialize RAW Cell Handle Register */
3393 card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev, 3393 card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev,
3394 2 * sizeof(u32), 3394 2 * sizeof(u32),
3395 &card->raw_cell_paddr, 3395 &card->raw_cell_paddr,
3396 GFP_KERNEL); 3396 GFP_KERNEL);
3397 if (!card->raw_cell_hnd) { 3397 if (!card->raw_cell_hnd) {
3398 printk("%s: memory allocation failure.\n", card->name); 3398 printk("%s: memory allocation failure.\n", card->name);
3399 deinit_card(card); 3399 deinit_card(card);
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index a43276c76fc6..21393ec3b9a4 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client)
509 struct ht16k33_priv *priv = i2c_get_clientdata(client); 509 struct ht16k33_priv *priv = i2c_get_clientdata(client);
510 struct ht16k33_fbdev *fbdev = &priv->fbdev; 510 struct ht16k33_fbdev *fbdev = &priv->fbdev;
511 511
512 cancel_delayed_work(&fbdev->work); 512 cancel_delayed_work_sync(&fbdev->work);
513 unregister_framebuffer(fbdev->info); 513 unregister_framebuffer(fbdev->info);
514 framebuffer_release(fbdev->info); 514 framebuffer_release(fbdev->info);
515 free_page((unsigned long) fbdev->buffer); 515 free_page((unsigned long) fbdev->buffer);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index cf78fa6d470d..a7359535caf5 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
79 ct_idx = get_cacheinfo_idx(this_leaf->type); 79 ct_idx = get_cacheinfo_idx(this_leaf->type);
80 propname = cache_type_info[ct_idx].size_prop; 80 propname = cache_type_info[ct_idx].size_prop;
81 81
82 if (of_property_read_u32(np, propname, &this_leaf->size)) 82 of_property_read_u32(np, propname, &this_leaf->size);
83 this_leaf->size = 0;
84} 83}
85 84
86/* not cache_line_size() because that's a macro in include/linux/cache.h */ 85/* not cache_line_size() because that's a macro in include/linux/cache.h */
@@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
114 ct_idx = get_cacheinfo_idx(this_leaf->type); 113 ct_idx = get_cacheinfo_idx(this_leaf->type);
115 propname = cache_type_info[ct_idx].nr_sets_prop; 114 propname = cache_type_info[ct_idx].nr_sets_prop;
116 115
117 if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) 116 of_property_read_u32(np, propname, &this_leaf->number_of_sets);
118 this_leaf->number_of_sets = 0;
119} 117}
120 118
121static void cache_associativity(struct cacheinfo *this_leaf) 119static void cache_associativity(struct cacheinfo *this_leaf)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a690fd400260..0992e67e862b 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,7 @@
32#include <trace/events/power.h> 32#include <trace/events/power.h>
33#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
34#include <linux/cpuidle.h> 34#include <linux/cpuidle.h>
35#include <linux/devfreq.h>
35#include <linux/timer.h> 36#include <linux/timer.h>
36 37
37#include "../base.h" 38#include "../base.h"
@@ -1078,6 +1079,7 @@ void dpm_resume(pm_message_t state)
1078 dpm_show_time(starttime, state, 0, NULL); 1079 dpm_show_time(starttime, state, 0, NULL);
1079 1080
1080 cpufreq_resume(); 1081 cpufreq_resume();
1082 devfreq_resume();
1081 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 1083 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1082} 1084}
1083 1085
@@ -1852,6 +1854,7 @@ int dpm_suspend(pm_message_t state)
1852 trace_suspend_resume(TPS("dpm_suspend"), state.event, true); 1854 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1853 might_sleep(); 1855 might_sleep();
1854 1856
1857 devfreq_suspend();
1855 cpufreq_suspend(); 1858 cpufreq_suspend();
1856 1859
1857 mutex_lock(&dpm_list_mtx); 1860 mutex_lock(&dpm_list_mtx);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 70624695b6d5..ccd296dbb95c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -95,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
95static void pm_runtime_deactivate_timer(struct device *dev) 95static void pm_runtime_deactivate_timer(struct device *dev)
96{ 96{
97 if (dev->power.timer_expires > 0) { 97 if (dev->power.timer_expires > 0) {
98 hrtimer_cancel(&dev->power.suspend_timer); 98 hrtimer_try_to_cancel(&dev->power.suspend_timer);
99 dev->power.timer_expires = 0; 99 dev->power.timer_expires = 0;
100 } 100 }
101} 101}
@@ -121,7 +121,7 @@ static void pm_runtime_cancel_pending(struct device *dev)
121 * Compute the autosuspend-delay expiration time based on the device's 121 * Compute the autosuspend-delay expiration time based on the device's
122 * power.last_busy time. If the delay has already expired or is disabled 122 * power.last_busy time. If the delay has already expired or is disabled
123 * (negative) or the power.use_autosuspend flag isn't set, return 0. 123 * (negative) or the power.use_autosuspend flag isn't set, return 0.
124 * Otherwise return the expiration time in jiffies (adjusted to be nonzero). 124 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
125 * 125 *
126 * This function may be called either with or without dev->power.lock held. 126 * This function may be called either with or without dev->power.lock held.
127 * Either way it can be racy, since power.last_busy may be updated at any time. 127 * Either way it can be racy, since power.last_busy may be updated at any time.
@@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
130{ 130{
131 int autosuspend_delay; 131 int autosuspend_delay;
132 u64 last_busy, expires = 0; 132 u64 last_busy, expires = 0;
133 u64 now = ktime_to_ns(ktime_get()); 133 u64 now = ktime_get_mono_fast_ns();
134 134
135 if (!dev->power.use_autosuspend) 135 if (!dev->power.use_autosuspend)
136 goto out; 136 goto out;
@@ -141,7 +141,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
141 141
142 last_busy = READ_ONCE(dev->power.last_busy); 142 last_busy = READ_ONCE(dev->power.last_busy);
143 143
144 expires = last_busy + autosuspend_delay * NSEC_PER_MSEC; 144 expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
145 if (expires <= now) 145 if (expires <= now)
146 expires = 0; /* Already expired. */ 146 expires = 0; /* Already expired. */
147 147
@@ -525,7 +525,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
525 * We add a slack of 25% to gather wakeups 525 * We add a slack of 25% to gather wakeups
526 * without sacrificing the granularity. 526 * without sacrificing the granularity.
527 */ 527 */
528 u64 slack = READ_ONCE(dev->power.autosuspend_delay) * 528 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
529 (NSEC_PER_MSEC >> 2); 529 (NSEC_PER_MSEC >> 2);
530 530
531 dev->power.timer_expires = expires; 531 dev->power.timer_expires = expires;
@@ -905,8 +905,11 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
905 spin_lock_irqsave(&dev->power.lock, flags); 905 spin_lock_irqsave(&dev->power.lock, flags);
906 906
907 expires = dev->power.timer_expires; 907 expires = dev->power.timer_expires;
908 /* If 'expire' is after 'jiffies' we've been called too early. */ 908 /*
909 if (expires > 0 && expires < ktime_to_ns(ktime_get())) { 909 * If 'expires' is after the current time, we've been called
910 * too early.
911 */
912 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
910 dev->power.timer_expires = 0; 913 dev->power.timer_expires = 0;
911 rpm_suspend(dev, dev->power.timer_autosuspends ? 914 rpm_suspend(dev, dev->power.timer_autosuspends ?
912 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); 915 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -925,7 +928,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
925int pm_schedule_suspend(struct device *dev, unsigned int delay) 928int pm_schedule_suspend(struct device *dev, unsigned int delay)
926{ 929{
927 unsigned long flags; 930 unsigned long flags;
928 ktime_t expires; 931 u64 expires;
929 int retval; 932 int retval;
930 933
931 spin_lock_irqsave(&dev->power.lock, flags); 934 spin_lock_irqsave(&dev->power.lock, flags);
@@ -942,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
942 /* Other scheduled or pending requests need to be canceled. */ 945 /* Other scheduled or pending requests need to be canceled. */
943 pm_runtime_cancel_pending(dev); 946 pm_runtime_cancel_pending(dev);
944 947
945 expires = ktime_add(ktime_get(), ms_to_ktime(delay)); 948 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
946 dev->power.timer_expires = ktime_to_ns(expires); 949 dev->power.timer_expires = expires;
947 dev->power.timer_autosuspends = 0; 950 dev->power.timer_autosuspends = 0;
948 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); 951 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
949 952
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 1bd1145ad8b5..330c1f7e9665 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -108,6 +108,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
108 * suppress pointless writes. 108 * suppress pointless writes.
109 */ 109 */
110 for (i = 0; i < d->chip->num_regs; i++) { 110 for (i = 0; i < d->chip->num_regs; i++) {
111 if (!d->chip->mask_base)
112 continue;
113
111 reg = d->chip->mask_base + 114 reg = d->chip->mask_base +
112 (i * map->reg_stride * d->irq_reg_stride); 115 (i * map->reg_stride * d->irq_reg_stride);
113 if (d->chip->mask_invert) { 116 if (d->chip->mask_invert) {
@@ -258,7 +261,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
258 const struct regmap_irq_type *t = &irq_data->type; 261 const struct regmap_irq_type *t = &irq_data->type;
259 262
260 if ((t->types_supported & type) != type) 263 if ((t->types_supported & type) != type)
261 return -ENOTSUPP; 264 return 0;
262 265
263 reg = t->type_reg_offset / map->reg_stride; 266 reg = t->type_reg_offset / map->reg_stride;
264 267
@@ -588,6 +591,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
588 /* Mask all the interrupts by default */ 591 /* Mask all the interrupts by default */
589 for (i = 0; i < chip->num_regs; i++) { 592 for (i = 0; i < chip->num_regs; i++) {
590 d->mask_buf[i] = d->mask_buf_def[i]; 593 d->mask_buf[i] = d->mask_buf_def[i];
594 if (!chip->mask_base)
595 continue;
596
591 reg = chip->mask_base + 597 reg = chip->mask_base +
592 (i * map->reg_stride * d->irq_reg_stride); 598 (i * map->reg_stride * d->irq_reg_stride);
593 if (chip->mask_invert) 599 if (chip->mask_invert)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 6f2856c6d0f2..55481b40df9a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
4075 4075
4076 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 4076 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
4077 if (lock_fdc(drive)) 4077 if (lock_fdc(drive))
4078 return -EINTR; 4078 return 0;
4079 poll_drive(false, 0); 4079 poll_drive(false, 0);
4080 process_fd_request(); 4080 process_fd_request();
4081 } 4081 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b8a0720d3653..cf5538942834 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1190,6 +1190,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1190 goto out_unlock; 1190 goto out_unlock;
1191 } 1191 }
1192 1192
1193 if (lo->lo_offset != info->lo_offset ||
1194 lo->lo_sizelimit != info->lo_sizelimit) {
1195 sync_blockdev(lo->lo_device);
1196 kill_bdev(lo->lo_device);
1197 }
1198
1193 /* I/O need to be drained during transfer transition */ 1199 /* I/O need to be drained during transfer transition */
1194 blk_mq_freeze_queue(lo->lo_queue); 1200 blk_mq_freeze_queue(lo->lo_queue);
1195 1201
@@ -1218,6 +1224,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1218 1224
1219 if (lo->lo_offset != info->lo_offset || 1225 if (lo->lo_offset != info->lo_offset ||
1220 lo->lo_sizelimit != info->lo_sizelimit) { 1226 lo->lo_sizelimit != info->lo_sizelimit) {
1227 /* kill_bdev should have truncated all the pages */
1228 if (lo->lo_device->bd_inode->i_mapping->nrpages) {
1229 err = -EAGAIN;
1230 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1231 __func__, lo->lo_number, lo->lo_file_name,
1232 lo->lo_device->bd_inode->i_mapping->nrpages);
1233 goto out_unfreeze;
1234 }
1221 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { 1235 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
1222 err = -EFBIG; 1236 err = -EFBIG;
1223 goto out_unfreeze; 1237 goto out_unfreeze;
@@ -1443,22 +1457,39 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1443 1457
1444static int loop_set_block_size(struct loop_device *lo, unsigned long arg) 1458static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1445{ 1459{
1460 int err = 0;
1461
1446 if (lo->lo_state != Lo_bound) 1462 if (lo->lo_state != Lo_bound)
1447 return -ENXIO; 1463 return -ENXIO;
1448 1464
1449 if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) 1465 if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
1450 return -EINVAL; 1466 return -EINVAL;
1451 1467
1468 if (lo->lo_queue->limits.logical_block_size != arg) {
1469 sync_blockdev(lo->lo_device);
1470 kill_bdev(lo->lo_device);
1471 }
1472
1452 blk_mq_freeze_queue(lo->lo_queue); 1473 blk_mq_freeze_queue(lo->lo_queue);
1453 1474
1475 /* kill_bdev should have truncated all the pages */
1476 if (lo->lo_queue->limits.logical_block_size != arg &&
1477 lo->lo_device->bd_inode->i_mapping->nrpages) {
1478 err = -EAGAIN;
1479 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1480 __func__, lo->lo_number, lo->lo_file_name,
1481 lo->lo_device->bd_inode->i_mapping->nrpages);
1482 goto out_unfreeze;
1483 }
1484
1454 blk_queue_logical_block_size(lo->lo_queue, arg); 1485 blk_queue_logical_block_size(lo->lo_queue, arg);
1455 blk_queue_physical_block_size(lo->lo_queue, arg); 1486 blk_queue_physical_block_size(lo->lo_queue, arg);
1456 blk_queue_io_min(lo->lo_queue, arg); 1487 blk_queue_io_min(lo->lo_queue, arg);
1457 loop_update_dio(lo); 1488 loop_update_dio(lo);
1458 1489out_unfreeze:
1459 blk_mq_unfreeze_queue(lo->lo_queue); 1490 blk_mq_unfreeze_queue(lo->lo_queue);
1460 1491
1461 return 0; 1492 return err;
1462} 1493}
1463 1494
1464static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, 1495static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 08696f5f00bb..7c9a949e876b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
288 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); 288 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
289 set_capacity(nbd->disk, config->bytesize >> 9); 289 set_capacity(nbd->disk, config->bytesize >> 9);
290 if (bdev) { 290 if (bdev) {
291 if (bdev->bd_disk) 291 if (bdev->bd_disk) {
292 bd_set_size(bdev, config->bytesize); 292 bd_set_size(bdev, config->bytesize);
293 else 293 set_blocksize(bdev, config->blksize);
294 } else
294 bdev->bd_invalidated = 1; 295 bdev->bd_invalidated = 1;
295 bdput(bdev); 296 bdput(bdev);
296 } 297 }
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index b3df2793e7cd..34b22d6523ba 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -97,6 +97,7 @@ void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
97#else 97#else
98static inline int null_zone_init(struct nullb_device *dev) 98static inline int null_zone_init(struct nullb_device *dev)
99{ 99{
100 pr_err("null_blk: CONFIG_BLK_DEV_ZONED not enabled\n");
100 return -EINVAL; 101 return -EINVAL;
101} 102}
102static inline void null_zone_exit(struct nullb_device *dev) {} 103static inline void null_zone_exit(struct nullb_device *dev) {}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8e5140bbf241..1e92b61d0bd5 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5986,7 +5986,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
5986 struct list_head *tmp; 5986 struct list_head *tmp;
5987 int dev_id; 5987 int dev_id;
5988 char opt_buf[6]; 5988 char opt_buf[6];
5989 bool already = false;
5990 bool force = false; 5989 bool force = false;
5991 int ret; 5990 int ret;
5992 5991
@@ -6019,13 +6018,13 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
6019 spin_lock_irq(&rbd_dev->lock); 6018 spin_lock_irq(&rbd_dev->lock);
6020 if (rbd_dev->open_count && !force) 6019 if (rbd_dev->open_count && !force)
6021 ret = -EBUSY; 6020 ret = -EBUSY;
6022 else 6021 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6023 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, 6022 &rbd_dev->flags))
6024 &rbd_dev->flags); 6023 ret = -EINPROGRESS;
6025 spin_unlock_irq(&rbd_dev->lock); 6024 spin_unlock_irq(&rbd_dev->lock);
6026 } 6025 }
6027 spin_unlock(&rbd_dev_list_lock); 6026 spin_unlock(&rbd_dev_list_lock);
6028 if (ret < 0 || already) 6027 if (ret)
6029 return ret; 6028 return ret;
6030 6029
6031 if (force) { 6030 if (force) {
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index a10d5736d8f7..ab893a7571a2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2641,8 +2641,8 @@ static int skd_cons_skcomp(struct skd_device *skdev)
2641 "comp pci_alloc, total bytes %zd entries %d\n", 2641 "comp pci_alloc, total bytes %zd entries %d\n",
2642 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); 2642 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
2643 2643
2644 skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, 2644 skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
2645 &skdev->cq_dma_address, GFP_KERNEL); 2645 &skdev->cq_dma_address, GFP_KERNEL);
2646 2646
2647 if (skcomp == NULL) { 2647 if (skcomp == NULL) {
2648 rc = -ENOMEM; 2648 rc = -ENOMEM;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 33c5cc879f24..04ca65912638 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -316,11 +316,9 @@ static ssize_t idle_store(struct device *dev,
316 * See the comment in writeback_store. 316 * See the comment in writeback_store.
317 */ 317 */
318 zram_slot_lock(zram, index); 318 zram_slot_lock(zram, index);
319 if (!zram_allocated(zram, index) || 319 if (zram_allocated(zram, index) &&
320 zram_test_flag(zram, index, ZRAM_UNDER_WB)) 320 !zram_test_flag(zram, index, ZRAM_UNDER_WB))
321 goto next; 321 zram_set_flag(zram, index, ZRAM_IDLE);
322 zram_set_flag(zram, index, ZRAM_IDLE);
323next:
324 zram_slot_unlock(zram, index); 322 zram_slot_unlock(zram, index);
325 } 323 }
326 324
@@ -330,6 +328,41 @@ next:
330} 328}
331 329
332#ifdef CONFIG_ZRAM_WRITEBACK 330#ifdef CONFIG_ZRAM_WRITEBACK
331static ssize_t writeback_limit_enable_store(struct device *dev,
332 struct device_attribute *attr, const char *buf, size_t len)
333{
334 struct zram *zram = dev_to_zram(dev);
335 u64 val;
336 ssize_t ret = -EINVAL;
337
338 if (kstrtoull(buf, 10, &val))
339 return ret;
340
341 down_read(&zram->init_lock);
342 spin_lock(&zram->wb_limit_lock);
343 zram->wb_limit_enable = val;
344 spin_unlock(&zram->wb_limit_lock);
345 up_read(&zram->init_lock);
346 ret = len;
347
348 return ret;
349}
350
351static ssize_t writeback_limit_enable_show(struct device *dev,
352 struct device_attribute *attr, char *buf)
353{
354 bool val;
355 struct zram *zram = dev_to_zram(dev);
356
357 down_read(&zram->init_lock);
358 spin_lock(&zram->wb_limit_lock);
359 val = zram->wb_limit_enable;
360 spin_unlock(&zram->wb_limit_lock);
361 up_read(&zram->init_lock);
362
363 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
364}
365
333static ssize_t writeback_limit_store(struct device *dev, 366static ssize_t writeback_limit_store(struct device *dev,
334 struct device_attribute *attr, const char *buf, size_t len) 367 struct device_attribute *attr, const char *buf, size_t len)
335{ 368{
@@ -341,9 +374,9 @@ static ssize_t writeback_limit_store(struct device *dev,
341 return ret; 374 return ret;
342 375
343 down_read(&zram->init_lock); 376 down_read(&zram->init_lock);
344 atomic64_set(&zram->stats.bd_wb_limit, val); 377 spin_lock(&zram->wb_limit_lock);
345 if (val == 0) 378 zram->bd_wb_limit = val;
346 zram->stop_writeback = false; 379 spin_unlock(&zram->wb_limit_lock);
347 up_read(&zram->init_lock); 380 up_read(&zram->init_lock);
348 ret = len; 381 ret = len;
349 382
@@ -357,7 +390,9 @@ static ssize_t writeback_limit_show(struct device *dev,
357 struct zram *zram = dev_to_zram(dev); 390 struct zram *zram = dev_to_zram(dev);
358 391
359 down_read(&zram->init_lock); 392 down_read(&zram->init_lock);
360 val = atomic64_read(&zram->stats.bd_wb_limit); 393 spin_lock(&zram->wb_limit_lock);
394 val = zram->bd_wb_limit;
395 spin_unlock(&zram->wb_limit_lock);
361 up_read(&zram->init_lock); 396 up_read(&zram->init_lock);
362 397
363 return scnprintf(buf, PAGE_SIZE, "%llu\n", val); 398 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
@@ -588,8 +623,8 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
588 return 1; 623 return 1;
589} 624}
590 625
591#define HUGE_WRITEBACK 0x1 626#define HUGE_WRITEBACK 1
592#define IDLE_WRITEBACK 0x2 627#define IDLE_WRITEBACK 2
593 628
594static ssize_t writeback_store(struct device *dev, 629static ssize_t writeback_store(struct device *dev,
595 struct device_attribute *attr, const char *buf, size_t len) 630 struct device_attribute *attr, const char *buf, size_t len)
@@ -602,7 +637,7 @@ static ssize_t writeback_store(struct device *dev,
602 struct page *page; 637 struct page *page;
603 ssize_t ret, sz; 638 ssize_t ret, sz;
604 char mode_buf[8]; 639 char mode_buf[8];
605 unsigned long mode = -1UL; 640 int mode = -1;
606 unsigned long blk_idx = 0; 641 unsigned long blk_idx = 0;
607 642
608 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); 643 sz = strscpy(mode_buf, buf, sizeof(mode_buf));
@@ -618,7 +653,7 @@ static ssize_t writeback_store(struct device *dev,
618 else if (!strcmp(mode_buf, "huge")) 653 else if (!strcmp(mode_buf, "huge"))
619 mode = HUGE_WRITEBACK; 654 mode = HUGE_WRITEBACK;
620 655
621 if (mode == -1UL) 656 if (mode == -1)
622 return -EINVAL; 657 return -EINVAL;
623 658
624 down_read(&zram->init_lock); 659 down_read(&zram->init_lock);
@@ -645,10 +680,13 @@ static ssize_t writeback_store(struct device *dev,
645 bvec.bv_len = PAGE_SIZE; 680 bvec.bv_len = PAGE_SIZE;
646 bvec.bv_offset = 0; 681 bvec.bv_offset = 0;
647 682
648 if (zram->stop_writeback) { 683 spin_lock(&zram->wb_limit_lock);
684 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
685 spin_unlock(&zram->wb_limit_lock);
649 ret = -EIO; 686 ret = -EIO;
650 break; 687 break;
651 } 688 }
689 spin_unlock(&zram->wb_limit_lock);
652 690
653 if (!blk_idx) { 691 if (!blk_idx) {
654 blk_idx = alloc_block_bdev(zram); 692 blk_idx = alloc_block_bdev(zram);
@@ -667,10 +705,11 @@ static ssize_t writeback_store(struct device *dev,
667 zram_test_flag(zram, index, ZRAM_UNDER_WB)) 705 zram_test_flag(zram, index, ZRAM_UNDER_WB))
668 goto next; 706 goto next;
669 707
670 if ((mode & IDLE_WRITEBACK && 708 if (mode == IDLE_WRITEBACK &&
671 !zram_test_flag(zram, index, ZRAM_IDLE)) && 709 !zram_test_flag(zram, index, ZRAM_IDLE))
672 (mode & HUGE_WRITEBACK && 710 goto next;
673 !zram_test_flag(zram, index, ZRAM_HUGE))) 711 if (mode == HUGE_WRITEBACK &&
712 !zram_test_flag(zram, index, ZRAM_HUGE))
674 goto next; 713 goto next;
675 /* 714 /*
676 * Clearing ZRAM_UNDER_WB is duty of caller. 715 * Clearing ZRAM_UNDER_WB is duty of caller.
@@ -732,11 +771,10 @@ static ssize_t writeback_store(struct device *dev,
732 zram_set_element(zram, index, blk_idx); 771 zram_set_element(zram, index, blk_idx);
733 blk_idx = 0; 772 blk_idx = 0;
734 atomic64_inc(&zram->stats.pages_stored); 773 atomic64_inc(&zram->stats.pages_stored);
735 if (atomic64_add_unless(&zram->stats.bd_wb_limit, 774 spin_lock(&zram->wb_limit_lock);
736 -1 << (PAGE_SHIFT - 12), 0)) { 775 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
737 if (atomic64_read(&zram->stats.bd_wb_limit) == 0) 776 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
738 zram->stop_writeback = true; 777 spin_unlock(&zram->wb_limit_lock);
739 }
740next: 778next:
741 zram_slot_unlock(zram, index); 779 zram_slot_unlock(zram, index);
742 } 780 }
@@ -1812,6 +1850,7 @@ static DEVICE_ATTR_RW(comp_algorithm);
1812static DEVICE_ATTR_RW(backing_dev); 1850static DEVICE_ATTR_RW(backing_dev);
1813static DEVICE_ATTR_WO(writeback); 1851static DEVICE_ATTR_WO(writeback);
1814static DEVICE_ATTR_RW(writeback_limit); 1852static DEVICE_ATTR_RW(writeback_limit);
1853static DEVICE_ATTR_RW(writeback_limit_enable);
1815#endif 1854#endif
1816 1855
1817static struct attribute *zram_disk_attrs[] = { 1856static struct attribute *zram_disk_attrs[] = {
@@ -1828,6 +1867,7 @@ static struct attribute *zram_disk_attrs[] = {
1828 &dev_attr_backing_dev.attr, 1867 &dev_attr_backing_dev.attr,
1829 &dev_attr_writeback.attr, 1868 &dev_attr_writeback.attr,
1830 &dev_attr_writeback_limit.attr, 1869 &dev_attr_writeback_limit.attr,
1870 &dev_attr_writeback_limit_enable.attr,
1831#endif 1871#endif
1832 &dev_attr_io_stat.attr, 1872 &dev_attr_io_stat.attr,
1833 &dev_attr_mm_stat.attr, 1873 &dev_attr_mm_stat.attr,
@@ -1867,7 +1907,9 @@ static int zram_add(void)
1867 device_id = ret; 1907 device_id = ret;
1868 1908
1869 init_rwsem(&zram->init_lock); 1909 init_rwsem(&zram->init_lock);
1870 1910#ifdef CONFIG_ZRAM_WRITEBACK
1911 spin_lock_init(&zram->wb_limit_lock);
1912#endif
1871 queue = blk_alloc_queue(GFP_KERNEL); 1913 queue = blk_alloc_queue(GFP_KERNEL);
1872 if (!queue) { 1914 if (!queue) {
1873 pr_err("Error allocating disk queue for device %d\n", 1915 pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 4bd3afd15e83..f2fd46daa760 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -86,7 +86,6 @@ struct zram_stats {
86 atomic64_t bd_count; /* no. of pages in backing device */ 86 atomic64_t bd_count; /* no. of pages in backing device */
87 atomic64_t bd_reads; /* no. of reads from backing device */ 87 atomic64_t bd_reads; /* no. of reads from backing device */
88 atomic64_t bd_writes; /* no. of writes from backing device */ 88 atomic64_t bd_writes; /* no. of writes from backing device */
89 atomic64_t bd_wb_limit; /* writeback limit of backing device */
90#endif 89#endif
91}; 90};
92 91
@@ -114,8 +113,10 @@ struct zram {
114 */ 113 */
115 bool claim; /* Protected by bdev->bd_mutex */ 114 bool claim; /* Protected by bdev->bd_mutex */
116 struct file *backing_dev; 115 struct file *backing_dev;
117 bool stop_writeback;
118#ifdef CONFIG_ZRAM_WRITEBACK 116#ifdef CONFIG_ZRAM_WRITEBACK
117 spinlock_t wb_limit_lock;
118 bool wb_limit_enable;
119 u64 bd_wb_limit;
119 struct block_device *bdev; 120 struct block_device *bdev;
120 unsigned int old_block_size; 121 unsigned int old_block_size;
121 unsigned long *bitmap; 122 unsigned long *bitmap;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f94d33525771..d299ec79e4c3 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
781 SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, 781 SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
782 SYSC_QUIRK_LEGACY_IDLE), 782 SYSC_QUIRK_LEGACY_IDLE),
783 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 783 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
784 SYSC_QUIRK_LEGACY_IDLE), 784 0),
785 /* Some timers on omap4 and later */ 785 /* Some timers on omap4 and later */
786 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, 786 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff,
787 SYSC_QUIRK_LEGACY_IDLE), 787 0),
788 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, 788 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
789 SYSC_QUIRK_LEGACY_IDLE), 789 0),
790 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, 790 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
791 SYSC_QUIRK_LEGACY_IDLE), 791 SYSC_QUIRK_LEGACY_IDLE),
792 /* Uarts on omap4 and later */ 792 /* Uarts on omap4 and later */
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index a74ce885b541..c518659b4d9f 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -32,6 +32,7 @@
32#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34#include <linux/uuid.h> 34#include <linux/uuid.h>
35#include <linux/nospec.h>
35 36
36#define IPMI_DRIVER_VERSION "39.2" 37#define IPMI_DRIVER_VERSION "39.2"
37 38
@@ -62,7 +63,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data,
62{ } 63{ }
63#endif 64#endif
64 65
65static int initialized; 66static bool initialized;
67static bool drvregistered;
66 68
67enum ipmi_panic_event_op { 69enum ipmi_panic_event_op {
68 IPMI_SEND_PANIC_EVENT_NONE, 70 IPMI_SEND_PANIC_EVENT_NONE,
@@ -612,7 +614,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
612 614
613static LIST_HEAD(ipmi_interfaces); 615static LIST_HEAD(ipmi_interfaces);
614static DEFINE_MUTEX(ipmi_interfaces_mutex); 616static DEFINE_MUTEX(ipmi_interfaces_mutex);
615DEFINE_STATIC_SRCU(ipmi_interfaces_srcu); 617struct srcu_struct ipmi_interfaces_srcu;
616 618
617/* 619/*
618 * List of watchers that want to know when smi's are added and deleted. 620 * List of watchers that want to know when smi's are added and deleted.
@@ -720,7 +722,15 @@ struct watcher_entry {
720int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 722int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
721{ 723{
722 struct ipmi_smi *intf; 724 struct ipmi_smi *intf;
723 int index; 725 int index, rv;
726
727 /*
728 * Make sure the driver is actually initialized, this handles
729 * problems with initialization order.
730 */
731 rv = ipmi_init_msghandler();
732 if (rv)
733 return rv;
724 734
725 mutex_lock(&smi_watchers_mutex); 735 mutex_lock(&smi_watchers_mutex);
726 736
@@ -884,7 +894,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
884 894
885 if (user) { 895 if (user) {
886 user->handler->ipmi_recv_hndl(msg, user->handler_data); 896 user->handler->ipmi_recv_hndl(msg, user->handler_data);
887 release_ipmi_user(msg->user, index); 897 release_ipmi_user(user, index);
888 } else { 898 } else {
889 /* User went away, give up. */ 899 /* User went away, give up. */
890 ipmi_free_recv_msg(msg); 900 ipmi_free_recv_msg(msg);
@@ -1076,7 +1086,7 @@ int ipmi_create_user(unsigned int if_num,
1076{ 1086{
1077 unsigned long flags; 1087 unsigned long flags;
1078 struct ipmi_user *new_user; 1088 struct ipmi_user *new_user;
1079 int rv = 0, index; 1089 int rv, index;
1080 struct ipmi_smi *intf; 1090 struct ipmi_smi *intf;
1081 1091
1082 /* 1092 /*
@@ -1094,18 +1104,9 @@ int ipmi_create_user(unsigned int if_num,
1094 * Make sure the driver is actually initialized, this handles 1104 * Make sure the driver is actually initialized, this handles
1095 * problems with initialization order. 1105 * problems with initialization order.
1096 */ 1106 */
1097 if (!initialized) { 1107 rv = ipmi_init_msghandler();
1098 rv = ipmi_init_msghandler(); 1108 if (rv)
1099 if (rv) 1109 return rv;
1100 return rv;
1101
1102 /*
1103 * The init code doesn't return an error if it was turned
1104 * off, but it won't initialize. Check that.
1105 */
1106 if (!initialized)
1107 return -ENODEV;
1108 }
1109 1110
1110 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); 1111 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1111 if (!new_user) 1112 if (!new_user)
@@ -1183,6 +1184,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
1183static void free_user(struct kref *ref) 1184static void free_user(struct kref *ref)
1184{ 1185{
1185 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1186 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1187 cleanup_srcu_struct(&user->release_barrier);
1186 kfree(user); 1188 kfree(user);
1187} 1189}
1188 1190
@@ -1259,7 +1261,6 @@ int ipmi_destroy_user(struct ipmi_user *user)
1259{ 1261{
1260 _ipmi_destroy_user(user); 1262 _ipmi_destroy_user(user);
1261 1263
1262 cleanup_srcu_struct(&user->release_barrier);
1263 kref_put(&user->refcount, free_user); 1264 kref_put(&user->refcount, free_user);
1264 1265
1265 return 0; 1266 return 0;
@@ -1298,10 +1299,12 @@ int ipmi_set_my_address(struct ipmi_user *user,
1298 if (!user) 1299 if (!user)
1299 return -ENODEV; 1300 return -ENODEV;
1300 1301
1301 if (channel >= IPMI_MAX_CHANNELS) 1302 if (channel >= IPMI_MAX_CHANNELS) {
1302 rv = -EINVAL; 1303 rv = -EINVAL;
1303 else 1304 } else {
1305 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1304 user->intf->addrinfo[channel].address = address; 1306 user->intf->addrinfo[channel].address = address;
1307 }
1305 release_ipmi_user(user, index); 1308 release_ipmi_user(user, index);
1306 1309
1307 return rv; 1310 return rv;
@@ -1318,10 +1321,12 @@ int ipmi_get_my_address(struct ipmi_user *user,
1318 if (!user) 1321 if (!user)
1319 return -ENODEV; 1322 return -ENODEV;
1320 1323
1321 if (channel >= IPMI_MAX_CHANNELS) 1324 if (channel >= IPMI_MAX_CHANNELS) {
1322 rv = -EINVAL; 1325 rv = -EINVAL;
1323 else 1326 } else {
1327 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1324 *address = user->intf->addrinfo[channel].address; 1328 *address = user->intf->addrinfo[channel].address;
1329 }
1325 release_ipmi_user(user, index); 1330 release_ipmi_user(user, index);
1326 1331
1327 return rv; 1332 return rv;
@@ -1338,10 +1343,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
1338 if (!user) 1343 if (!user)
1339 return -ENODEV; 1344 return -ENODEV;
1340 1345
1341 if (channel >= IPMI_MAX_CHANNELS) 1346 if (channel >= IPMI_MAX_CHANNELS) {
1342 rv = -EINVAL; 1347 rv = -EINVAL;
1343 else 1348 } else {
1349 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1344 user->intf->addrinfo[channel].lun = LUN & 0x3; 1350 user->intf->addrinfo[channel].lun = LUN & 0x3;
1351 }
1345 release_ipmi_user(user, index); 1352 release_ipmi_user(user, index);
1346 1353
1347 return rv; 1354 return rv;
@@ -1358,10 +1365,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
1358 if (!user) 1365 if (!user)
1359 return -ENODEV; 1366 return -ENODEV;
1360 1367
1361 if (channel >= IPMI_MAX_CHANNELS) 1368 if (channel >= IPMI_MAX_CHANNELS) {
1362 rv = -EINVAL; 1369 rv = -EINVAL;
1363 else 1370 } else {
1371 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1364 *address = user->intf->addrinfo[channel].lun; 1372 *address = user->intf->addrinfo[channel].lun;
1373 }
1365 release_ipmi_user(user, index); 1374 release_ipmi_user(user, index);
1366 1375
1367 return rv; 1376 return rv;
@@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf,
2184{ 2193{
2185 if (addr->channel >= IPMI_MAX_CHANNELS) 2194 if (addr->channel >= IPMI_MAX_CHANNELS)
2186 return -EINVAL; 2195 return -EINVAL;
2196 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2187 *lun = intf->addrinfo[addr->channel].lun; 2197 *lun = intf->addrinfo[addr->channel].lun;
2188 *saddr = intf->addrinfo[addr->channel].address; 2198 *saddr = intf->addrinfo[addr->channel].address;
2189 return 0; 2199 return 0;
@@ -3291,17 +3301,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3291 * Make sure the driver is actually initialized, this handles 3301 * Make sure the driver is actually initialized, this handles
3292 * problems with initialization order. 3302 * problems with initialization order.
3293 */ 3303 */
3294 if (!initialized) { 3304 rv = ipmi_init_msghandler();
3295 rv = ipmi_init_msghandler(); 3305 if (rv)
3296 if (rv) 3306 return rv;
3297 return rv;
3298 /*
3299 * The init code doesn't return an error if it was turned
3300 * off, but it won't initialize. Check that.
3301 */
3302 if (!initialized)
3303 return -ENODEV;
3304 }
3305 3307
3306 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3308 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3307 if (!intf) 3309 if (!intf)
@@ -5017,6 +5019,22 @@ static int panic_event(struct notifier_block *this,
5017 return NOTIFY_DONE; 5019 return NOTIFY_DONE;
5018} 5020}
5019 5021
5022/* Must be called with ipmi_interfaces_mutex held. */
5023static int ipmi_register_driver(void)
5024{
5025 int rv;
5026
5027 if (drvregistered)
5028 return 0;
5029
5030 rv = driver_register(&ipmidriver.driver);
5031 if (rv)
5032 pr_err("Could not register IPMI driver\n");
5033 else
5034 drvregistered = true;
5035 return rv;
5036}
5037
5020static struct notifier_block panic_block = { 5038static struct notifier_block panic_block = {
5021 .notifier_call = panic_event, 5039 .notifier_call = panic_event,
5022 .next = NULL, 5040 .next = NULL,
@@ -5027,66 +5045,75 @@ static int ipmi_init_msghandler(void)
5027{ 5045{
5028 int rv; 5046 int rv;
5029 5047
5048 mutex_lock(&ipmi_interfaces_mutex);
5049 rv = ipmi_register_driver();
5050 if (rv)
5051 goto out;
5030 if (initialized) 5052 if (initialized)
5031 return 0; 5053 goto out;
5032
5033 rv = driver_register(&ipmidriver.driver);
5034 if (rv) {
5035 pr_err("Could not register IPMI driver\n");
5036 return rv;
5037 }
5038 5054
5039 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5055 init_srcu_struct(&ipmi_interfaces_srcu);
5040 5056
5041 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5057 timer_setup(&ipmi_timer, ipmi_timeout, 0);
5042 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5058 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5043 5059
5044 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5060 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5045 5061
5046 initialized = 1; 5062 initialized = true;
5047 5063
5048 return 0; 5064out:
5065 mutex_unlock(&ipmi_interfaces_mutex);
5066 return rv;
5049} 5067}
5050 5068
5051static int __init ipmi_init_msghandler_mod(void) 5069static int __init ipmi_init_msghandler_mod(void)
5052{ 5070{
5053 ipmi_init_msghandler(); 5071 int rv;
5054 return 0; 5072
5073 pr_info("version " IPMI_DRIVER_VERSION "\n");
5074
5075 mutex_lock(&ipmi_interfaces_mutex);
5076 rv = ipmi_register_driver();
5077 mutex_unlock(&ipmi_interfaces_mutex);
5078
5079 return rv;
5055} 5080}
5056 5081
5057static void __exit cleanup_ipmi(void) 5082static void __exit cleanup_ipmi(void)
5058{ 5083{
5059 int count; 5084 int count;
5060 5085
5061 if (!initialized) 5086 if (initialized) {
5062 return; 5087 atomic_notifier_chain_unregister(&panic_notifier_list,
5063 5088 &panic_block);
5064 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
5065 5089
5066 /* 5090 /*
5067 * This can't be called if any interfaces exist, so no worry 5091 * This can't be called if any interfaces exist, so no worry
5068 * about shutting down the interfaces. 5092 * about shutting down the interfaces.
5069 */ 5093 */
5070 5094
5071 /* 5095 /*
5072 * Tell the timer to stop, then wait for it to stop. This 5096 * Tell the timer to stop, then wait for it to stop. This
5073 * avoids problems with race conditions removing the timer 5097 * avoids problems with race conditions removing the timer
5074 * here. 5098 * here.
5075 */ 5099 */
5076 atomic_inc(&stop_operation); 5100 atomic_inc(&stop_operation);
5077 del_timer_sync(&ipmi_timer); 5101 del_timer_sync(&ipmi_timer);
5078 5102
5079 driver_unregister(&ipmidriver.driver); 5103 initialized = false;
5080 5104
5081 initialized = 0; 5105 /* Check for buffer leaks. */
5106 count = atomic_read(&smi_msg_inuse_count);
5107 if (count != 0)
5108 pr_warn("SMI message count %d at exit\n", count);
5109 count = atomic_read(&recv_msg_inuse_count);
5110 if (count != 0)
5111 pr_warn("recv message count %d at exit\n", count);
5082 5112
5083 /* Check for buffer leaks. */ 5113 cleanup_srcu_struct(&ipmi_interfaces_srcu);
5084 count = atomic_read(&smi_msg_inuse_count); 5114 }
5085 if (count != 0) 5115 if (drvregistered)
5086 pr_warn("SMI message count %d at exit\n", count); 5116 driver_unregister(&ipmidriver.driver);
5087 count = atomic_read(&recv_msg_inuse_count);
5088 if (count != 0)
5089 pr_warn("recv message count %d at exit\n", count);
5090} 5117}
5091module_exit(cleanup_ipmi); 5118module_exit(cleanup_ipmi);
5092 5119
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index ca9528c4f183..b7a1ae2afaea 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -632,8 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
632 632
633 /* Remove the multi-part read marker. */ 633 /* Remove the multi-part read marker. */
634 len -= 2; 634 len -= 2;
635 data += 2;
635 for (i = 0; i < len; i++) 636 for (i = 0; i < len; i++)
636 ssif_info->data[i] = data[i+2]; 637 ssif_info->data[i] = data[i];
637 ssif_info->multi_len = len; 638 ssif_info->multi_len = len;
638 ssif_info->multi_pos = 1; 639 ssif_info->multi_pos = 1;
639 640
@@ -661,8 +662,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
661 } 662 }
662 663
663 blocknum = data[0]; 664 blocknum = data[0];
665 len--;
666 data++;
667
668 if (blocknum != 0xff && len != 31) {
669 /* All blocks but the last must have 31 data bytes. */
670 result = -EIO;
671 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
672 pr_info("Received middle message <31\n");
664 673
665 if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { 674 goto continue_op;
675 }
676
677 if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
666 /* Received message too big, abort the operation. */ 678 /* Received message too big, abort the operation. */
667 result = -E2BIG; 679 result = -E2BIG;
668 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) 680 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -671,16 +683,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
671 goto continue_op; 683 goto continue_op;
672 } 684 }
673 685
674 /* Remove the blocknum from the data. */
675 len--;
676 for (i = 0; i < len; i++) 686 for (i = 0; i < len; i++)
677 ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; 687 ssif_info->data[i + ssif_info->multi_len] = data[i];
678 ssif_info->multi_len += len; 688 ssif_info->multi_len += len;
679 if (blocknum == 0xff) { 689 if (blocknum == 0xff) {
680 /* End of read */ 690 /* End of read */
681 len = ssif_info->multi_len; 691 len = ssif_info->multi_len;
682 data = ssif_info->data; 692 data = ssif_info->data;
683 } else if (blocknum + 1 != ssif_info->multi_pos) { 693 } else if (blocknum != ssif_info->multi_pos) {
684 /* 694 /*
685 * Out of sequence block, just abort. Block 695 * Out of sequence block, just abort. Block
686 * numbers start at zero for the second block, 696 * numbers start at zero for the second block,
@@ -707,6 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
707 } 717 }
708 } 718 }
709 719
720 continue_op:
710 if (result < 0) { 721 if (result < 0) {
711 ssif_inc_stat(ssif_info, receive_errors); 722 ssif_inc_stat(ssif_info, receive_errors);
712 } else { 723 } else {
@@ -714,8 +725,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
714 ssif_inc_stat(ssif_info, received_message_parts); 725 ssif_inc_stat(ssif_info, received_message_parts);
715 } 726 }
716 727
717
718 continue_op:
719 if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) 728 if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
720 pr_info("DONE 1: state = %d, result=%d\n", 729 pr_info("DONE 1: state = %d, result=%d\n",
721 ssif_info->ssif_state, result); 730 ssif_info->ssif_state, result);
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index b5e3103c1175..e43c876a9223 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -59,6 +59,7 @@
59#include <linux/mutex.h> 59#include <linux/mutex.h>
60#include <linux/delay.h> 60#include <linux/delay.h>
61#include <linux/serial_8250.h> 61#include <linux/serial_8250.h>
62#include <linux/nospec.h>
62#include "smapi.h" 63#include "smapi.h"
63#include "mwavedd.h" 64#include "mwavedd.h"
64#include "3780i.h" 65#include "3780i.h"
@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
289 ipcnum); 290 ipcnum);
290 return -EINVAL; 291 return -EINVAL;
291 } 292 }
293 ipcnum = array_index_nospec(ipcnum,
294 ARRAY_SIZE(pDrvData->IPCs));
292 PRINTK_3(TRACE_MWAVE, 295 PRINTK_3(TRACE_MWAVE,
293 "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" 296 "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
294 " ipcnum %x entry usIntCount %x\n", 297 " ipcnum %x entry usIntCount %x\n",
@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
317 " Invalid ipcnum %x\n", ipcnum); 320 " Invalid ipcnum %x\n", ipcnum);
318 return -EINVAL; 321 return -EINVAL;
319 } 322 }
323 ipcnum = array_index_nospec(ipcnum,
324 ARRAY_SIZE(pDrvData->IPCs));
320 PRINTK_3(TRACE_MWAVE, 325 PRINTK_3(TRACE_MWAVE,
321 "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" 326 "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
322 " ipcnum %x, usIntCount %x\n", 327 " ipcnum %x, usIntCount %x\n",
@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
383 ipcnum); 388 ipcnum);
384 return -EINVAL; 389 return -EINVAL;
385 } 390 }
391 ipcnum = array_index_nospec(ipcnum,
392 ARRAY_SIZE(pDrvData->IPCs));
386 mutex_lock(&mwave_mutex); 393 mutex_lock(&mwave_mutex);
387 if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { 394 if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
388 pDrvData->IPCs[ipcnum].bIsEnabled = false; 395 pDrvData->IPCs[ipcnum].bIsEnabled = false;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index e5b2fe80eab4..e705aab9e38b 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -290,10 +290,15 @@ config COMMON_CLK_BD718XX
290 This driver supports ROHM BD71837 and ROHM BD71847 290 This driver supports ROHM BD71837 and ROHM BD71847
291 PMICs clock gates. 291 PMICs clock gates.
292 292
293config COMMON_CLK_FIXED_MMIO
294 bool "Clock driver for Memory Mapped Fixed values"
295 depends on COMMON_CLK && OF
296 help
297 Support for Memory Mapped IO Fixed clocks
298
293source "drivers/clk/actions/Kconfig" 299source "drivers/clk/actions/Kconfig"
294source "drivers/clk/bcm/Kconfig" 300source "drivers/clk/bcm/Kconfig"
295source "drivers/clk/hisilicon/Kconfig" 301source "drivers/clk/hisilicon/Kconfig"
296source "drivers/clk/imx/Kconfig"
297source "drivers/clk/imgtec/Kconfig" 302source "drivers/clk/imgtec/Kconfig"
298source "drivers/clk/imx/Kconfig" 303source "drivers/clk/imx/Kconfig"
299source "drivers/clk/ingenic/Kconfig" 304source "drivers/clk/ingenic/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 8a9440a97500..1db133652f0c 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
27obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o 27obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
28obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o 28obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
29obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o 29obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
30obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
30obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o 31obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
31obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o 32obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
32obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o 33obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
@@ -78,7 +79,7 @@ obj-$(CONFIG_ARCH_K3) += keystone/
78obj-$(CONFIG_ARCH_KEYSTONE) += keystone/ 79obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
79obj-$(CONFIG_MACH_LOONGSON32) += loongson1/ 80obj-$(CONFIG_MACH_LOONGSON32) += loongson1/
80obj-y += mediatek/ 81obj-y += mediatek/
81obj-$(CONFIG_COMMON_CLK_AMLOGIC) += meson/ 82obj-$(CONFIG_ARCH_MESON) += meson/
82obj-$(CONFIG_MACH_PIC32) += microchip/ 83obj-$(CONFIG_MACH_PIC32) += microchip/
83ifeq ($(CONFIG_COMMON_CLK), y) 84ifeq ($(CONFIG_COMMON_CLK), y)
84obj-$(CONFIG_ARCH_MMP) += mmp/ 85obj-$(CONFIG_ARCH_MMP) += mmp/
diff --git a/drivers/clk/actions/Kconfig b/drivers/clk/actions/Kconfig
index 04f0a6355726..5b45ca35757e 100644
--- a/drivers/clk/actions/Kconfig
+++ b/drivers/clk/actions/Kconfig
@@ -9,6 +9,11 @@ if CLK_ACTIONS
9 9
10# SoC Drivers 10# SoC Drivers
11 11
12config CLK_OWL_S500
13 bool "Support for the Actions Semi OWL S500 clocks"
14 depends on ARCH_ACTIONS || COMPILE_TEST
15 default ARCH_ACTIONS
16
12config CLK_OWL_S700 17config CLK_OWL_S700
13 bool "Support for the Actions Semi OWL S700 clocks" 18 bool "Support for the Actions Semi OWL S700 clocks"
14 depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST 19 depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST
diff --git a/drivers/clk/actions/Makefile b/drivers/clk/actions/Makefile
index ccfdf9781cef..a2588e55c790 100644
--- a/drivers/clk/actions/Makefile
+++ b/drivers/clk/actions/Makefile
@@ -10,5 +10,6 @@ clk-owl-y += owl-pll.o
10clk-owl-y += owl-reset.o 10clk-owl-y += owl-reset.o
11 11
12# SoC support 12# SoC support
13obj-$(CONFIG_CLK_OWL_S500) += owl-s500.o
13obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o 14obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o
14obj-$(CONFIG_CLK_OWL_S900) += owl-s900.o 15obj-$(CONFIG_CLK_OWL_S900) += owl-s900.o
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c
index 058e06d7099f..02437bdedf4d 100644
--- a/drivers/clk/actions/owl-pll.c
+++ b/drivers/clk/actions/owl-pll.c
@@ -179,7 +179,7 @@ static int owl_pll_set_rate(struct clk_hw *hw, unsigned long rate,
179 179
180 regmap_write(common->regmap, pll_hw->reg, reg); 180 regmap_write(common->regmap, pll_hw->reg, reg);
181 181
182 udelay(PLL_STABILITY_WAIT_US); 182 udelay(pll_hw->delay);
183 183
184 return 0; 184 return 0;
185} 185}
diff --git a/drivers/clk/actions/owl-pll.h b/drivers/clk/actions/owl-pll.h
index 0aae30abd5dc..6fb0d45bb088 100644
--- a/drivers/clk/actions/owl-pll.h
+++ b/drivers/clk/actions/owl-pll.h
@@ -13,6 +13,8 @@
13 13
14#include "owl-common.h" 14#include "owl-common.h"
15 15
16#define OWL_PLL_DEF_DELAY 50
17
16/* last entry should have rate = 0 */ 18/* last entry should have rate = 0 */
17struct clk_pll_table { 19struct clk_pll_table {
18 unsigned int val; 20 unsigned int val;
@@ -27,6 +29,7 @@ struct owl_pll_hw {
27 u8 width; 29 u8 width;
28 u8 min_mul; 30 u8 min_mul;
29 u8 max_mul; 31 u8 max_mul;
32 u8 delay;
30 const struct clk_pll_table *table; 33 const struct clk_pll_table *table;
31}; 34};
32 35
@@ -36,7 +39,7 @@ struct owl_pll {
36}; 39};
37 40
38#define OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \ 41#define OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
39 _width, _min_mul, _max_mul, _table) \ 42 _width, _min_mul, _max_mul, _delay, _table) \
40 { \ 43 { \
41 .reg = _reg, \ 44 .reg = _reg, \
42 .bfreq = _bfreq, \ 45 .bfreq = _bfreq, \
@@ -45,6 +48,7 @@ struct owl_pll {
45 .width = _width, \ 48 .width = _width, \
46 .min_mul = _min_mul, \ 49 .min_mul = _min_mul, \
47 .max_mul = _max_mul, \ 50 .max_mul = _max_mul, \
51 .delay = _delay, \
48 .table = _table, \ 52 .table = _table, \
49 } 53 }
50 54
@@ -52,8 +56,8 @@ struct owl_pll {
52 _shift, _width, _min_mul, _max_mul, _table, _flags) \ 56 _shift, _width, _min_mul, _max_mul, _table, _flags) \
53 struct owl_pll _struct = { \ 57 struct owl_pll _struct = { \
54 .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \ 58 .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
55 _width, _min_mul, \ 59 _width, _min_mul, _max_mul, \
56 _max_mul, _table), \ 60 OWL_PLL_DEF_DELAY, _table), \
57 .common = { \ 61 .common = { \
58 .regmap = NULL, \ 62 .regmap = NULL, \
59 .hw.init = CLK_HW_INIT(_name, \ 63 .hw.init = CLK_HW_INIT(_name, \
@@ -67,8 +71,23 @@ struct owl_pll {
67 _shift, _width, _min_mul, _max_mul, _table, _flags) \ 71 _shift, _width, _min_mul, _max_mul, _table, _flags) \
68 struct owl_pll _struct = { \ 72 struct owl_pll _struct = { \
69 .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \ 73 .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
70 _width, _min_mul, \ 74 _width, _min_mul, _max_mul, \
71 _max_mul, _table), \ 75 OWL_PLL_DEF_DELAY, _table), \
76 .common = { \
77 .regmap = NULL, \
78 .hw.init = CLK_HW_INIT_NO_PARENT(_name, \
79 &owl_pll_ops, \
80 _flags), \
81 }, \
82 }
83
84#define OWL_PLL_NO_PARENT_DELAY(_struct, _name, _reg, _bfreq, _bit_idx, \
85 _shift, _width, _min_mul, _max_mul, _delay, _table, \
86 _flags) \
87 struct owl_pll _struct = { \
88 .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
89 _width, _min_mul, _max_mul, \
90 _delay, _table), \
72 .common = { \ 91 .common = { \
73 .regmap = NULL, \ 92 .regmap = NULL, \
74 .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ 93 .hw.init = CLK_HW_INIT_NO_PARENT(_name, \
@@ -78,7 +97,6 @@ struct owl_pll {
78 } 97 }
79 98
80#define mul_mask(m) ((1 << ((m)->width)) - 1) 99#define mul_mask(m) ((1 << ((m)->width)) - 1)
81#define PLL_STABILITY_WAIT_US (50)
82 100
83static inline struct owl_pll *hw_to_owl_pll(const struct clk_hw *hw) 101static inline struct owl_pll *hw_to_owl_pll(const struct clk_hw *hw)
84{ 102{
diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c
new file mode 100644
index 000000000000..e2007ac4d235
--- /dev/null
+++ b/drivers/clk/actions/owl-s500.c
@@ -0,0 +1,525 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Actions Semi Owl S500 SoC clock driver
4 *
5 * Copyright (c) 2014 Actions Semi Inc.
6 * Author: David Liu <liuwei@actions-semi.com>
7 *
8 * Copyright (c) 2018 Linaro Ltd.
9 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
10 *
11 * Copyright (c) 2018 LSI-TEC - Caninos Loucos
12 * Author: Edgar Bernardi Righi <edgar.righi@lsitec.org.br>
13 */
14
15#include <linux/clk-provider.h>
16#include <linux/platform_device.h>
17
18#include "owl-common.h"
19#include "owl-composite.h"
20#include "owl-divider.h"
21#include "owl-factor.h"
22#include "owl-fixed-factor.h"
23#include "owl-gate.h"
24#include "owl-mux.h"
25#include "owl-pll.h"
26
27#include <dt-bindings/clock/actions,s500-cmu.h>
28
29#define CMU_COREPLL (0x0000)
30#define CMU_DEVPLL (0x0004)
31#define CMU_DDRPLL (0x0008)
32#define CMU_NANDPLL (0x000C)
33#define CMU_DISPLAYPLL (0x0010)
34#define CMU_AUDIOPLL (0x0014)
35#define CMU_TVOUTPLL (0x0018)
36#define CMU_BUSCLK (0x001C)
37#define CMU_SENSORCLK (0x0020)
38#define CMU_LCDCLK (0x0024)
39#define CMU_DSICLK (0x0028)
40#define CMU_CSICLK (0x002C)
41#define CMU_DECLK (0x0030)
42#define CMU_BISPCLK (0x0034)
43#define CMU_BUSCLK1 (0x0038)
44#define CMU_VDECLK (0x0040)
45#define CMU_VCECLK (0x0044)
46#define CMU_NANDCCLK (0x004C)
47#define CMU_SD0CLK (0x0050)
48#define CMU_SD1CLK (0x0054)
49#define CMU_SD2CLK (0x0058)
50#define CMU_UART0CLK (0x005C)
51#define CMU_UART1CLK (0x0060)
52#define CMU_UART2CLK (0x0064)
53#define CMU_PWM4CLK (0x0068)
54#define CMU_PWM5CLK (0x006C)
55#define CMU_PWM0CLK (0x0070)
56#define CMU_PWM1CLK (0x0074)
57#define CMU_PWM2CLK (0x0078)
58#define CMU_PWM3CLK (0x007C)
59#define CMU_USBPLL (0x0080)
60#define CMU_ETHERNETPLL (0x0084)
61#define CMU_CVBSPLL (0x0088)
62#define CMU_LENSCLK (0x008C)
63#define CMU_GPU3DCLK (0x0090)
64#define CMU_CORECTL (0x009C)
65#define CMU_DEVCLKEN0 (0x00A0)
66#define CMU_DEVCLKEN1 (0x00A4)
67#define CMU_DEVRST0 (0x00A8)
68#define CMU_DEVRST1 (0x00AC)
69#define CMU_UART3CLK (0x00B0)
70#define CMU_UART4CLK (0x00B4)
71#define CMU_UART5CLK (0x00B8)
72#define CMU_UART6CLK (0x00BC)
73#define CMU_SSCLK (0x00C0)
74#define CMU_DIGITALDEBUG (0x00D0)
75#define CMU_ANALOGDEBUG (0x00D4)
76#define CMU_COREPLLDEBUG (0x00D8)
77#define CMU_DEVPLLDEBUG (0x00DC)
78#define CMU_DDRPLLDEBUG (0x00E0)
79#define CMU_NANDPLLDEBUG (0x00E4)
80#define CMU_DISPLAYPLLDEBUG (0x00E8)
81#define CMU_TVOUTPLLDEBUG (0x00EC)
82#define CMU_DEEPCOLORPLLDEBUG (0x00F4)
83#define CMU_AUDIOPLL_ETHPLLDEBUG (0x00F8)
84#define CMU_CVBSPLLDEBUG (0x00FC)
85
86#define OWL_S500_COREPLL_DELAY (150)
87#define OWL_S500_DDRPLL_DELAY (63)
88#define OWL_S500_DEVPLL_DELAY (28)
89#define OWL_S500_NANDPLL_DELAY (44)
90#define OWL_S500_DISPLAYPLL_DELAY (57)
91#define OWL_S500_ETHERNETPLL_DELAY (25)
92#define OWL_S500_AUDIOPLL_DELAY (100)
93
94static const struct clk_pll_table clk_audio_pll_table[] = {
95 { 0, 45158400 }, { 1, 49152000 },
96 { 0, 0 },
97};
98
99/* pll clocks */
100static OWL_PLL_NO_PARENT_DELAY(ethernet_pll_clk, "ethernet_pll_clk", CMU_ETHERNETPLL, 500000000, 0, 0, 0, 0, 0, OWL_S500_ETHERNETPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
101static OWL_PLL_NO_PARENT_DELAY(core_pll_clk, "core_pll_clk", CMU_COREPLL, 12000000, 9, 0, 8, 4, 134, OWL_S500_COREPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
102static OWL_PLL_NO_PARENT_DELAY(ddr_pll_clk, "ddr_pll_clk", CMU_DDRPLL, 12000000, 8, 0, 8, 1, 67, OWL_S500_DDRPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
103static OWL_PLL_NO_PARENT_DELAY(nand_pll_clk, "nand_pll_clk", CMU_NANDPLL, 6000000, 8, 0, 7, 2, 86, OWL_S500_NANDPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
104static OWL_PLL_NO_PARENT_DELAY(display_pll_clk, "display_pll_clk", CMU_DISPLAYPLL, 6000000, 8, 0, 8, 2, 126, OWL_S500_DISPLAYPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
105static OWL_PLL_NO_PARENT_DELAY(dev_pll_clk, "dev_pll_clk", CMU_DEVPLL, 6000000, 8, 0, 7, 8, 126, OWL_S500_DEVPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
106static OWL_PLL_NO_PARENT_DELAY(audio_pll_clk, "audio_pll_clk", CMU_AUDIOPLL, 0, 4, 0, 1, 0, 0, OWL_S500_AUDIOPLL_DELAY, clk_audio_pll_table, CLK_IGNORE_UNUSED);
107
108static const char * const dev_clk_mux_p[] = { "hosc", "dev_pll_clk" };
109static const char * const bisp_clk_mux_p[] = { "display_pll_clk", "dev_clk" };
110static const char * const sensor_clk_mux_p[] = { "hosc", "bisp_clk" };
111static const char * const sd_clk_mux_p[] = { "dev_clk", "nand_pll_clk" };
112static const char * const pwm_clk_mux_p[] = { "losc", "hosc" };
113static const char * const ahbprediv_clk_mux_p[] = { "dev_clk", "display_pll_clk", "nand_pll_clk", "ddr_pll_clk" };
114static const char * const uart_clk_mux_p[] = { "hosc", "dev_pll_clk" };
115static const char * const de_clk_mux_p[] = { "display_pll_clk", "dev_clk" };
116static const char * const i2s_clk_mux_p[] = { "audio_pll_clk" };
117static const char * const hde_clk_mux_p[] = { "dev_clk", "display_pll_clk", "nand_pll_clk", "ddr_pll_clk" };
118static const char * const nand_clk_mux_p[] = { "nand_pll_clk", "display_pll_clk", "dev_clk", "ddr_pll_clk" };
119
120static struct clk_factor_table sd_factor_table[] = {
121 /* bit0 ~ 4 */
122 { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
123 { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
124 { 8, 1, 9 }, { 9, 1, 10 }, { 10, 1, 11 }, { 11, 1, 12 },
125 { 12, 1, 13 }, { 13, 1, 14 }, { 14, 1, 15 }, { 15, 1, 16 },
126 { 16, 1, 17 }, { 17, 1, 18 }, { 18, 1, 19 }, { 19, 1, 20 },
127 { 20, 1, 21 }, { 21, 1, 22 }, { 22, 1, 23 }, { 23, 1, 24 },
128 { 24, 1, 25 }, { 25, 1, 26 }, { 26, 1, 27 }, { 27, 1, 28 },
129 { 28, 1, 29 }, { 29, 1, 30 }, { 30, 1, 31 }, { 31, 1, 32 },
130
131 /* bit8: /128 */
132 { 256, 1, 1 * 128 }, { 257, 1, 2 * 128 }, { 258, 1, 3 * 128 }, { 259, 1, 4 * 128 },
133 { 260, 1, 5 * 128 }, { 261, 1, 6 * 128 }, { 262, 1, 7 * 128 }, { 263, 1, 8 * 128 },
134 { 264, 1, 9 * 128 }, { 265, 1, 10 * 128 }, { 266, 1, 11 * 128 }, { 267, 1, 12 * 128 },
135 { 268, 1, 13 * 128 }, { 269, 1, 14 * 128 }, { 270, 1, 15 * 128 }, { 271, 1, 16 * 128 },
136 { 272, 1, 17 * 128 }, { 273, 1, 18 * 128 }, { 274, 1, 19 * 128 }, { 275, 1, 20 * 128 },
137 { 276, 1, 21 * 128 }, { 277, 1, 22 * 128 }, { 278, 1, 23 * 128 }, { 279, 1, 24 * 128 },
138 { 280, 1, 25 * 128 }, { 281, 1, 26 * 128 }, { 282, 1, 27 * 128 }, { 283, 1, 28 * 128 },
139 { 284, 1, 29 * 128 }, { 285, 1, 30 * 128 }, { 286, 1, 31 * 128 }, { 287, 1, 32 * 128 },
140 { 0, 0, 0 },
141};
142
143static struct clk_factor_table bisp_factor_table[] = {
144 { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
145 { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
146 { 0, 0, 0 },
147};
148
149static struct clk_factor_table ahb_factor_table[] = {
150 { 1, 1, 2 }, { 2, 1, 3 },
151 { 0, 0, 0 },
152};
153
154static struct clk_div_table rmii_ref_div_table[] = {
155 { 0, 4 }, { 1, 10 },
156 { 0, 0 },
157};
158
159static struct clk_div_table i2s_div_table[] = {
160 { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
161 { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
162 { 8, 24 },
163 { 0, 0 },
164};
165
166static struct clk_div_table nand_div_table[] = {
167 { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 6 },
168 { 4, 8 }, { 5, 10 }, { 6, 12 }, { 7, 14 },
169 { 8, 16 }, { 9, 18 }, { 10, 20 }, { 11, 22 },
170 { 0, 0 },
171};
172
173/* mux clock */
174static OWL_MUX(dev_clk, "dev_clk", dev_clk_mux_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT);
175static OWL_MUX(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
176
177/* gate clocks */
178static OWL_GATE(spi0_clk, "spi0_clk", "ahb_clk", CMU_DEVCLKEN1, 10, 0, CLK_IGNORE_UNUSED);
179static OWL_GATE(spi1_clk, "spi1_clk", "ahb_clk", CMU_DEVCLKEN1, 11, 0, CLK_IGNORE_UNUSED);
180static OWL_GATE(spi2_clk, "spi2_clk", "ahb_clk", CMU_DEVCLKEN1, 12, 0, CLK_IGNORE_UNUSED);
181static OWL_GATE(spi3_clk, "spi3_clk", "ahb_clk", CMU_DEVCLKEN1, 13, 0, CLK_IGNORE_UNUSED);
182static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
183static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
184
185/* divider clocks */
186static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
187static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
188
189/* factor clocks */
190static OWL_FACTOR(ahb_clk, "ahb_clk", "h_clk", CMU_BUSCLK1, 2, 2, ahb_factor_table, 0, 0);
191static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 3, bisp_factor_table, 0, 0);
192static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 3, bisp_factor_table, 0, 0);
193
194/* composite clocks */
195static OWL_COMP_FACTOR(vce_clk, "vce_clk", hde_clk_mux_p,
196 OWL_MUX_HW(CMU_VCECLK, 4, 2),
197 OWL_GATE_HW(CMU_DEVCLKEN0, 26, 0),
198 OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, bisp_factor_table),
199 0);
200
201static OWL_COMP_FACTOR(vde_clk, "vde_clk", hde_clk_mux_p,
202 OWL_MUX_HW(CMU_VDECLK, 4, 2),
203 OWL_GATE_HW(CMU_DEVCLKEN0, 25, 0),
204 OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, bisp_factor_table),
205 0);
206
207static OWL_COMP_FACTOR(bisp_clk, "bisp_clk", bisp_clk_mux_p,
208 OWL_MUX_HW(CMU_BISPCLK, 4, 1),
209 OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
210 OWL_FACTOR_HW(CMU_BISPCLK, 0, 3, 0, bisp_factor_table),
211 0);
212
213static OWL_COMP_FACTOR(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
214 OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
215 OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
216 OWL_FACTOR_HW(CMU_SENSORCLK, 0, 3, 0, bisp_factor_table),
217 CLK_IGNORE_UNUSED);
218
219static OWL_COMP_FACTOR(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
220 OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
221 OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
222 OWL_FACTOR_HW(CMU_SENSORCLK, 8, 3, 0, bisp_factor_table),
223 CLK_IGNORE_UNUSED);
224
225static OWL_COMP_FACTOR(sd0_clk, "sd0_clk", sd_clk_mux_p,
226 OWL_MUX_HW(CMU_SD0CLK, 9, 1),
227 OWL_GATE_HW(CMU_DEVCLKEN0, 5, 0),
228 OWL_FACTOR_HW(CMU_SD0CLK, 0, 9, 0, sd_factor_table),
229 0);
230
231static OWL_COMP_FACTOR(sd1_clk, "sd1_clk", sd_clk_mux_p,
232 OWL_MUX_HW(CMU_SD1CLK, 9, 1),
233 OWL_GATE_HW(CMU_DEVCLKEN0, 6, 0),
234 OWL_FACTOR_HW(CMU_SD1CLK, 0, 9, 0, sd_factor_table),
235 0);
236
237static OWL_COMP_FACTOR(sd2_clk, "sd2_clk", sd_clk_mux_p,
238 OWL_MUX_HW(CMU_SD2CLK, 9, 1),
239 OWL_GATE_HW(CMU_DEVCLKEN0, 7, 0),
240 OWL_FACTOR_HW(CMU_SD2CLK, 0, 9, 0, sd_factor_table),
241 0);
242
243static OWL_COMP_DIV(pwm0_clk, "pwm0_clk", pwm_clk_mux_p,
244 OWL_MUX_HW(CMU_PWM0CLK, 12, 1),
245 OWL_GATE_HW(CMU_DEVCLKEN1, 23, 0),
246 OWL_DIVIDER_HW(CMU_PWM0CLK, 0, 10, 0, NULL),
247 0);
248
249static OWL_COMP_DIV(pwm1_clk, "pwm1_clk", pwm_clk_mux_p,
250 OWL_MUX_HW(CMU_PWM1CLK, 12, 1),
251 OWL_GATE_HW(CMU_DEVCLKEN1, 24, 0),
252 OWL_DIVIDER_HW(CMU_PWM1CLK, 0, 10, 0, NULL),
253 0);
254
255static OWL_COMP_DIV(pwm2_clk, "pwm2_clk", pwm_clk_mux_p,
256 OWL_MUX_HW(CMU_PWM2CLK, 12, 1),
257 OWL_GATE_HW(CMU_DEVCLKEN1, 25, 0),
258 OWL_DIVIDER_HW(CMU_PWM2CLK, 0, 10, 0, NULL),
259 0);
260
261static OWL_COMP_DIV(pwm3_clk, "pwm3_clk", pwm_clk_mux_p,
262 OWL_MUX_HW(CMU_PWM3CLK, 12, 1),
263 OWL_GATE_HW(CMU_DEVCLKEN1, 26, 0),
264 OWL_DIVIDER_HW(CMU_PWM3CLK, 0, 10, 0, NULL),
265 0);
266
267static OWL_COMP_DIV(pwm4_clk, "pwm4_clk", pwm_clk_mux_p,
268 OWL_MUX_HW(CMU_PWM4CLK, 12, 1),
269 OWL_GATE_HW(CMU_DEVCLKEN0, 11, 0),
270 OWL_DIVIDER_HW(CMU_PWM4CLK, 0, 10, 0, NULL),
271 0);
272
273static OWL_COMP_DIV(pwm5_clk, "pwm5_clk", pwm_clk_mux_p,
274 OWL_MUX_HW(CMU_PWM5CLK, 12, 1),
275 OWL_GATE_HW(CMU_DEVCLKEN0, 0, 0),
276 OWL_DIVIDER_HW(CMU_PWM5CLK, 0, 10, 0, NULL),
277 0);
278
279static OWL_COMP_PASS(de_clk, "de_clk", de_clk_mux_p,
280 OWL_MUX_HW(CMU_DECLK, 12, 1),
281 OWL_GATE_HW(CMU_DEVCLKEN0, 8, 0),
282 0);
283
284static OWL_COMP_FIXED_FACTOR(i2c0_clk, "i2c0_clk", "ethernet_pll_clk",
285 OWL_GATE_HW(CMU_DEVCLKEN1, 14, 0),
286 1, 5, 0);
287
288static OWL_COMP_FIXED_FACTOR(i2c1_clk, "i2c1_clk", "ethernet_pll_clk",
289 OWL_GATE_HW(CMU_DEVCLKEN1, 15, 0),
290 1, 5, 0);
291
292static OWL_COMP_FIXED_FACTOR(i2c2_clk, "i2c2_clk", "ethernet_pll_clk",
293 OWL_GATE_HW(CMU_DEVCLKEN1, 30, 0),
294 1, 5, 0);
295
296static OWL_COMP_FIXED_FACTOR(i2c3_clk, "i2c3_clk", "ethernet_pll_clk",
297 OWL_GATE_HW(CMU_DEVCLKEN1, 31, 0),
298 1, 5, 0);
299
300static OWL_COMP_DIV(uart0_clk, "uart0_clk", uart_clk_mux_p,
301 OWL_MUX_HW(CMU_UART0CLK, 16, 1),
302 OWL_GATE_HW(CMU_DEVCLKEN1, 6, 0),
303 OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
304 CLK_IGNORE_UNUSED);
305
306static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
307 OWL_MUX_HW(CMU_UART1CLK, 16, 1),
308 OWL_GATE_HW(CMU_DEVCLKEN1, 7, 0),
309 OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
310 CLK_IGNORE_UNUSED);
311
312static OWL_COMP_DIV(uart2_clk, "uart2_clk", uart_clk_mux_p,
313 OWL_MUX_HW(CMU_UART2CLK, 16, 1),
314 OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
315 OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
316 CLK_IGNORE_UNUSED);
317
318static OWL_COMP_DIV(uart3_clk, "uart3_clk", uart_clk_mux_p,
319 OWL_MUX_HW(CMU_UART3CLK, 16, 1),
320 OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
321 OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
322 CLK_IGNORE_UNUSED);
323
324static OWL_COMP_DIV(uart4_clk, "uart4_clk", uart_clk_mux_p,
325 OWL_MUX_HW(CMU_UART4CLK, 16, 1),
326 OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
327 OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
328 CLK_IGNORE_UNUSED);
329
330static OWL_COMP_DIV(uart5_clk, "uart5_clk", uart_clk_mux_p,
331 OWL_MUX_HW(CMU_UART5CLK, 16, 1),
332 OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
333 OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
334 CLK_IGNORE_UNUSED);
335
336static OWL_COMP_DIV(uart6_clk, "uart6_clk", uart_clk_mux_p,
337 OWL_MUX_HW(CMU_UART6CLK, 16, 1),
338 OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
339 OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
340 CLK_IGNORE_UNUSED);
341
342static OWL_COMP_DIV(i2srx_clk, "i2srx_clk", i2s_clk_mux_p,
343 OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
344 OWL_GATE_HW(CMU_DEVCLKEN0, 21, 0),
345 OWL_DIVIDER_HW(CMU_AUDIOPLL, 20, 4, 0, i2s_div_table),
346 0);
347
348static OWL_COMP_DIV(i2stx_clk, "i2stx_clk", i2s_clk_mux_p,
349 OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
350 OWL_GATE_HW(CMU_DEVCLKEN0, 20, 0),
351 OWL_DIVIDER_HW(CMU_AUDIOPLL, 16, 4, 0, i2s_div_table),
352 0);
353
354static OWL_COMP_DIV(hdmia_clk, "hdmia_clk", i2s_clk_mux_p,
355 OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
356 OWL_GATE_HW(CMU_DEVCLKEN0, 22, 0),
357 OWL_DIVIDER_HW(CMU_AUDIOPLL, 24, 4, 0, i2s_div_table),
358 0);
359
360static OWL_COMP_DIV(spdif_clk, "spdif_clk", i2s_clk_mux_p,
361 OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
362 OWL_GATE_HW(CMU_DEVCLKEN0, 23, 0),
363 OWL_DIVIDER_HW(CMU_AUDIOPLL, 28, 4, 0, i2s_div_table),
364 0);
365
366static OWL_COMP_DIV(nand_clk, "nand_clk", nand_clk_mux_p,
367 OWL_MUX_HW(CMU_NANDCCLK, 8, 2),
368 OWL_GATE_HW(CMU_DEVCLKEN0, 4, 0),
369 OWL_DIVIDER_HW(CMU_NANDCCLK, 0, 3, 0, nand_div_table),
370 CLK_SET_RATE_PARENT);
371
372static OWL_COMP_DIV(ecc_clk, "ecc_clk", nand_clk_mux_p,
373 OWL_MUX_HW(CMU_NANDCCLK, 8, 2),
374 OWL_GATE_HW(CMU_DEVCLKEN0, 4, 0),
375 OWL_DIVIDER_HW(CMU_NANDCCLK, 4, 3, 0, nand_div_table),
376 CLK_SET_RATE_PARENT);
377
378static struct owl_clk_common *s500_clks[] = {
379 &ethernet_pll_clk.common,
380 &core_pll_clk.common,
381 &ddr_pll_clk.common,
382 &dev_pll_clk.common,
383 &nand_pll_clk.common,
384 &audio_pll_clk.common,
385 &display_pll_clk.common,
386 &dev_clk.common,
387 &timer_clk.common,
388 &i2c0_clk.common,
389 &i2c1_clk.common,
390 &i2c2_clk.common,
391 &i2c3_clk.common,
392 &uart0_clk.common,
393 &uart1_clk.common,
394 &uart2_clk.common,
395 &uart3_clk.common,
396 &uart4_clk.common,
397 &uart5_clk.common,
398 &uart6_clk.common,
399 &pwm0_clk.common,
400 &pwm1_clk.common,
401 &pwm2_clk.common,
402 &pwm3_clk.common,
403 &pwm4_clk.common,
404 &pwm5_clk.common,
405 &sensor0_clk.common,
406 &sensor1_clk.common,
407 &sd0_clk.common,
408 &sd1_clk.common,
409 &sd2_clk.common,
410 &bisp_clk.common,
411 &ahb_clk.common,
412 &ahbprediv_clk.common,
413 &h_clk.common,
414 &spi0_clk.common,
415 &spi1_clk.common,
416 &spi2_clk.common,
417 &spi3_clk.common,
418 &rmii_ref_clk.common,
419 &de_clk.common,
420 &de1_clk.common,
421 &de2_clk.common,
422 &i2srx_clk.common,
423 &i2stx_clk.common,
424 &hdmia_clk.common,
425 &hdmi_clk.common,
426 &vce_clk.common,
427 &vde_clk.common,
428 &spdif_clk.common,
429 &nand_clk.common,
430 &ecc_clk.common,
431};
432
433static struct clk_hw_onecell_data s500_hw_clks = {
434 .hws = {
435 [CLK_ETHERNET_PLL] = &ethernet_pll_clk.common.hw,
436 [CLK_CORE_PLL] = &core_pll_clk.common.hw,
437 [CLK_DDR_PLL] = &ddr_pll_clk.common.hw,
438 [CLK_NAND_PLL] = &nand_pll_clk.common.hw,
439 [CLK_DISPLAY_PLL] = &display_pll_clk.common.hw,
440 [CLK_DEV_PLL] = &dev_pll_clk.common.hw,
441 [CLK_AUDIO_PLL] = &audio_pll_clk.common.hw,
442 [CLK_TIMER] = &timer_clk.common.hw,
443 [CLK_DEV] = &dev_clk.common.hw,
444 [CLK_DE] = &de_clk.common.hw,
445 [CLK_DE1] = &de1_clk.common.hw,
446 [CLK_DE2] = &de2_clk.common.hw,
447 [CLK_I2C0] = &i2c0_clk.common.hw,
448 [CLK_I2C1] = &i2c1_clk.common.hw,
449 [CLK_I2C2] = &i2c2_clk.common.hw,
450 [CLK_I2C3] = &i2c3_clk.common.hw,
451 [CLK_I2SRX] = &i2srx_clk.common.hw,
452 [CLK_I2STX] = &i2stx_clk.common.hw,
453 [CLK_UART0] = &uart0_clk.common.hw,
454 [CLK_UART1] = &uart1_clk.common.hw,
455 [CLK_UART2] = &uart2_clk.common.hw,
456 [CLK_UART3] = &uart3_clk.common.hw,
457 [CLK_UART4] = &uart4_clk.common.hw,
458 [CLK_UART5] = &uart5_clk.common.hw,
459 [CLK_UART6] = &uart6_clk.common.hw,
460 [CLK_PWM0] = &pwm0_clk.common.hw,
461 [CLK_PWM1] = &pwm1_clk.common.hw,
462 [CLK_PWM2] = &pwm2_clk.common.hw,
463 [CLK_PWM3] = &pwm3_clk.common.hw,
464 [CLK_PWM4] = &pwm4_clk.common.hw,
465 [CLK_PWM5] = &pwm5_clk.common.hw,
466 [CLK_SENSOR0] = &sensor0_clk.common.hw,
467 [CLK_SENSOR1] = &sensor1_clk.common.hw,
468 [CLK_SD0] = &sd0_clk.common.hw,
469 [CLK_SD1] = &sd1_clk.common.hw,
470 [CLK_SD2] = &sd2_clk.common.hw,
471 [CLK_BISP] = &bisp_clk.common.hw,
472 [CLK_SPI0] = &spi0_clk.common.hw,
473 [CLK_SPI1] = &spi1_clk.common.hw,
474 [CLK_SPI2] = &spi2_clk.common.hw,
475 [CLK_SPI3] = &spi3_clk.common.hw,
476 [CLK_AHB] = &ahb_clk.common.hw,
477 [CLK_H] = &h_clk.common.hw,
478 [CLK_AHBPREDIV] = &ahbprediv_clk.common.hw,
479 [CLK_RMII_REF] = &rmii_ref_clk.common.hw,
480 [CLK_HDMI_AUDIO] = &hdmia_clk.common.hw,
481 [CLK_HDMI] = &hdmi_clk.common.hw,
482 [CLK_VDE] = &vde_clk.common.hw,
483 [CLK_VCE] = &vce_clk.common.hw,
484 [CLK_SPDIF] = &spdif_clk.common.hw,
485 [CLK_NAND] = &nand_clk.common.hw,
486 [CLK_ECC] = &ecc_clk.common.hw,
487 },
488 .num = CLK_NR_CLKS,
489};
490
491static struct owl_clk_desc s500_clk_desc = {
492 .clks = s500_clks,
493 .num_clks = ARRAY_SIZE(s500_clks),
494
495 .hw_clks = &s500_hw_clks,
496};
497
498static int s500_clk_probe(struct platform_device *pdev)
499{
500 struct owl_clk_desc *desc;
501
502 desc = &s500_clk_desc;
503 owl_clk_regmap_init(pdev, desc);
504
505 return owl_clk_probe(&pdev->dev, desc->hw_clks);
506}
507
508static const struct of_device_id s500_clk_of_match[] = {
509 { .compatible = "actions,s500-cmu", },
510 { /* sentinel */ }
511};
512
513static struct platform_driver s500_clk_driver = {
514 .probe = s500_clk_probe,
515 .driver = {
516 .name = "s500-cmu",
517 .of_match_table = s500_clk_of_match,
518 },
519};
520
521static int __init s500_clk_init(void)
522{
523 return platform_driver_register(&s500_clk_driver);
524}
525core_initcall(s500_clk_init);
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 2fe225a697df..3487e03d4bc6 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
144 return; 144 return;
145 145
146 at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1, 146 at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
147 nck(at91sam9x5_systemck), 147 nck(at91sam9x5_systemck), 31, 0);
148 nck(at91sam9x35_periphck), 0);
149 if (!at91sam9x5_pmc) 148 if (!at91sam9x5_pmc)
150 return; 149 return;
151 150
@@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
210 parent_names[1] = "mainck"; 209 parent_names[1] = "mainck";
211 parent_names[2] = "plladivck"; 210 parent_names[2] = "plladivck";
212 parent_names[3] = "utmick"; 211 parent_names[3] = "utmick";
213 parent_names[4] = "mck"; 212 parent_names[4] = "masterck";
214 for (i = 0; i < 2; i++) { 213 for (i = 0; i < 2; i++) {
215 char name[6]; 214 char name[6];
216 215
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index d69ad96fe988..cd0ef7274fdb 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
240 parent_names[1] = "mainck"; 240 parent_names[1] = "mainck";
241 parent_names[2] = "plladivck"; 241 parent_names[2] = "plladivck";
242 parent_names[3] = "utmick"; 242 parent_names[3] = "utmick";
243 parent_names[4] = "mck"; 243 parent_names[4] = "masterck";
244 for (i = 0; i < 3; i++) { 244 for (i = 0; i < 3; i++) {
245 char name[6]; 245 char name[6];
246 246
@@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
291 parent_names[1] = "mainck"; 291 parent_names[1] = "mainck";
292 parent_names[2] = "plladivck"; 292 parent_names[2] = "plladivck";
293 parent_names[3] = "utmick"; 293 parent_names[3] = "utmick";
294 parent_names[4] = "mck"; 294 parent_names[4] = "masterck";
295 parent_names[5] = "audiopll_pmcck"; 295 parent_names[5] = "audiopll_pmcck";
296 for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) { 296 for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
297 hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, 297 hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index e358be7f6c8d..b645a9d59cdb 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
207 parent_names[1] = "mainck"; 207 parent_names[1] = "mainck";
208 parent_names[2] = "plladivck"; 208 parent_names[2] = "plladivck";
209 parent_names[3] = "utmick"; 209 parent_names[3] = "utmick";
210 parent_names[4] = "mck"; 210 parent_names[4] = "masterck";
211 for (i = 0; i < 3; i++) { 211 for (i = 0; i < 3; i++) {
212 char name[6]; 212 char name[6];
213 213
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index 2c04396402ab..c36c47bdba02 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -44,21 +44,21 @@ struct clps711x_clk {
44 struct clk_hw_onecell_data clk_data; 44 struct clk_hw_onecell_data clk_data;
45}; 45};
46 46
47static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base, 47static void __init clps711x_clk_init_dt(struct device_node *np)
48 u32 fref)
49{ 48{
50 u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi; 49 u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi, fref = 0;
51 struct clps711x_clk *clps711x_clk; 50 struct clps711x_clk *clps711x_clk;
52 unsigned i; 51 void __iomem *base;
52
53 WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
53 54
54 if (!base) 55 base = of_iomap(np, 0);
55 return ERR_PTR(-ENOMEM); 56 BUG_ON(!base);
56 57
57 clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws, 58 clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws,
58 CLPS711X_CLK_MAX), 59 CLPS711X_CLK_MAX),
59 GFP_KERNEL); 60 GFP_KERNEL);
60 if (!clps711x_clk) 61 BUG_ON(!clps711x_clk);
61 return ERR_PTR(-ENOMEM);
62 62
63 spin_lock_init(&clps711x_clk->lock); 63 spin_lock_init(&clps711x_clk->lock);
64 64
@@ -137,52 +137,13 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
137 clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10); 137 clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
138 clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] = 138 clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] =
139 clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64); 139 clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64);
140 for (i = 0; i < CLPS711X_CLK_MAX; i++) 140 for (tmp = 0; tmp < CLPS711X_CLK_MAX; tmp++)
141 if (IS_ERR(clps711x_clk->clk_data.hws[i])) 141 if (IS_ERR(clps711x_clk->clk_data.hws[tmp]))
142 pr_err("clk %i: register failed with %ld\n", 142 pr_err("clk %i: register failed with %ld\n",
143 i, PTR_ERR(clps711x_clk->clk_data.hws[i])); 143 tmp, PTR_ERR(clps711x_clk->clk_data.hws[tmp]));
144
145 return clps711x_clk;
146}
147
148void __init clps711x_clk_init(void __iomem *base)
149{
150 struct clps711x_clk *clps711x_clk;
151
152 clps711x_clk = _clps711x_clk_init(base, 73728000);
153
154 BUG_ON(IS_ERR(clps711x_clk));
155
156 /* Clocksource */
157 clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER1],
158 NULL, "clps711x-timer.0");
159 clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER2],
160 NULL, "clps711x-timer.1");
161
162 /* Drivers */
163 clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_PWM],
164 NULL, "clps711x-pwm");
165 clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
166 NULL, "clps711x-uart.0");
167 clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
168 NULL, "clps711x-uart.1");
169}
170
171#ifdef CONFIG_OF
172static void __init clps711x_clk_init_dt(struct device_node *np)
173{
174 void __iomem *base = of_iomap(np, 0);
175 struct clps711x_clk *clps711x_clk;
176 u32 fref = 0;
177
178 WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
179
180 clps711x_clk = _clps711x_clk_init(base, fref);
181 BUG_ON(IS_ERR(clps711x_clk));
182 144
183 clps711x_clk->clk_data.num = CLPS711X_CLK_MAX; 145 clps711x_clk->clk_data.num = CLPS711X_CLK_MAX;
184 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, 146 of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
185 &clps711x_clk->clk_data); 147 &clps711x_clk->clk_data);
186} 148}
187CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt); 149CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt);
188#endif
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index c9a86156ced8..daa1fc8fba53 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -29,6 +29,17 @@ struct clk *devm_clk_get(struct device *dev, const char *id)
29} 29}
30EXPORT_SYMBOL(devm_clk_get); 30EXPORT_SYMBOL(devm_clk_get);
31 31
32struct clk *devm_clk_get_optional(struct device *dev, const char *id)
33{
34 struct clk *clk = devm_clk_get(dev, id);
35
36 if (clk == ERR_PTR(-ENOENT))
37 return NULL;
38
39 return clk;
40}
41EXPORT_SYMBOL(devm_clk_get_optional);
42
32struct clk_bulk_devres { 43struct clk_bulk_devres {
33 struct clk_bulk_data *clks; 44 struct clk_bulk_data *clks;
34 int num_clks; 45 int num_clks;
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
new file mode 100644
index 000000000000..d1a97d971183
--- /dev/null
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -0,0 +1,101 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Memory Mapped IO Fixed clock driver
5 *
6 * Copyright (C) 2018 Cadence Design Systems, Inc.
7 *
8 * Authors:
9 * Jan Kotas <jank@cadence.com>
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/of_address.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16
17static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node)
18{
19 struct clk_hw *clk;
20 const char *clk_name = node->name;
21 void __iomem *base;
22 u32 freq;
23 int ret;
24
25 base = of_iomap(node, 0);
26 if (!base) {
27 pr_err("%pOFn: failed to map address\n", node);
28 return ERR_PTR(-EIO);
29 }
30
31 freq = readl(base);
32 iounmap(base);
33 of_property_read_string(node, "clock-output-names", &clk_name);
34
35 clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL, 0, freq);
36 if (IS_ERR(clk)) {
37 pr_err("%pOFn: failed to register fixed rate clock\n", node);
38 return clk;
39 }
40
41 ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, clk);
42 if (ret) {
43 pr_err("%pOFn: failed to add clock provider\n", node);
44 clk_hw_unregister(clk);
45 clk = ERR_PTR(ret);
46 }
47
48 return clk;
49}
50
51static void __init of_fixed_mmio_clk_setup(struct device_node *node)
52{
53 fixed_mmio_clk_setup(node);
54}
55CLK_OF_DECLARE(fixed_mmio_clk, "fixed-mmio-clock", of_fixed_mmio_clk_setup);
56
57/**
58 * This is not executed when of_fixed_mmio_clk_setup succeeded.
59 */
60static int of_fixed_mmio_clk_probe(struct platform_device *pdev)
61{
62 struct clk_hw *clk;
63
64 clk = fixed_mmio_clk_setup(pdev->dev.of_node);
65 if (IS_ERR(clk))
66 return PTR_ERR(clk);
67
68 platform_set_drvdata(pdev, clk);
69
70 return 0;
71}
72
73static int of_fixed_mmio_clk_remove(struct platform_device *pdev)
74{
75 struct clk_hw *clk = platform_get_drvdata(pdev);
76
77 of_clk_del_provider(pdev->dev.of_node);
78 clk_hw_unregister_fixed_rate(clk);
79
80 return 0;
81}
82
83static const struct of_device_id of_fixed_mmio_clk_ids[] = {
84 { .compatible = "fixed-mmio-clock" },
85 { }
86};
87MODULE_DEVICE_TABLE(of, of_fixed_mmio_clk_ids);
88
89static struct platform_driver of_fixed_mmio_clk_driver = {
90 .driver = {
91 .name = "of_fixed_mmio_clk",
92 .of_match_table = of_fixed_mmio_clk_ids,
93 },
94 .probe = of_fixed_mmio_clk_probe,
95 .remove = of_fixed_mmio_clk_remove,
96};
97module_platform_driver(of_fixed_mmio_clk_driver);
98
99MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
100MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver");
101MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 545dceec0bbf..fdfe2e423d15 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -79,7 +79,7 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
79 unsigned long m, n; 79 unsigned long m, n;
80 u64 ret; 80 u64 ret;
81 81
82 if (!rate || rate >= *parent_rate) 82 if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
83 return *parent_rate; 83 return *parent_rate;
84 84
85 if (fd->approximation) 85 if (fd->approximation)
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 25eed3e0251f..c2f07f0d077c 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -58,6 +58,35 @@ const struct clk_ops clk_gpio_gate_ops = {
58}; 58};
59EXPORT_SYMBOL_GPL(clk_gpio_gate_ops); 59EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
60 60
61static int clk_sleeping_gpio_gate_prepare(struct clk_hw *hw)
62{
63 struct clk_gpio *clk = to_clk_gpio(hw);
64
65 gpiod_set_value_cansleep(clk->gpiod, 1);
66
67 return 0;
68}
69
70static void clk_sleeping_gpio_gate_unprepare(struct clk_hw *hw)
71{
72 struct clk_gpio *clk = to_clk_gpio(hw);
73
74 gpiod_set_value_cansleep(clk->gpiod, 0);
75}
76
77static int clk_sleeping_gpio_gate_is_prepared(struct clk_hw *hw)
78{
79 struct clk_gpio *clk = to_clk_gpio(hw);
80
81 return gpiod_get_value_cansleep(clk->gpiod);
82}
83
84static const struct clk_ops clk_sleeping_gpio_gate_ops = {
85 .prepare = clk_sleeping_gpio_gate_prepare,
86 .unprepare = clk_sleeping_gpio_gate_unprepare,
87 .is_prepared = clk_sleeping_gpio_gate_is_prepared,
88};
89
61/** 90/**
62 * DOC: basic clock multiplexer which can be controlled with a gpio output 91 * DOC: basic clock multiplexer which can be controlled with a gpio output
63 * Traits of this clock: 92 * Traits of this clock:
@@ -144,10 +173,16 @@ struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
144 const char *parent_name, struct gpio_desc *gpiod, 173 const char *parent_name, struct gpio_desc *gpiod,
145 unsigned long flags) 174 unsigned long flags)
146{ 175{
176 const struct clk_ops *ops;
177
178 if (gpiod_cansleep(gpiod))
179 ops = &clk_sleeping_gpio_gate_ops;
180 else
181 ops = &clk_gpio_gate_ops;
182
147 return clk_register_gpio(dev, name, 183 return clk_register_gpio(dev, name,
148 (parent_name ? &parent_name : NULL), 184 (parent_name ? &parent_name : NULL),
149 (parent_name ? 1 : 0), gpiod, flags, 185 (parent_name ? 1 : 0), gpiod, flags, ops);
150 &clk_gpio_gate_ops);
151} 186}
152EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate); 187EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate);
153 188
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 727ed8e1bb72..8e4581004695 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -293,6 +293,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
293 /* Map system registers */ 293 /* Map system registers */
294 srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs"); 294 srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
295 hb_clk->reg = of_iomap(srnp, 0); 295 hb_clk->reg = of_iomap(srnp, 0);
296 of_node_put(srnp);
296 BUG_ON(!hb_clk->reg); 297 BUG_ON(!hb_clk->reg);
297 hb_clk->reg += reg; 298 hb_clk->reg += reg;
298 299
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index 22c937644c93..3727d5472450 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -235,8 +235,9 @@ static int max77686_clk_probe(struct platform_device *pdev)
235 return ret; 235 return ret;
236 } 236 }
237 237
238 ret = clk_hw_register_clkdev(&max_clk_data->hw, 238 ret = devm_clk_hw_register_clkdev(dev, &max_clk_data->hw,
239 max_clk_data->clk_idata.name, NULL); 239 max_clk_data->clk_idata.name,
240 NULL);
240 if (ret < 0) { 241 if (ret < 0) {
241 dev_err(dev, "Failed to clkdev register: %d\n", ret); 242 dev_err(dev, "Failed to clkdev register: %d\n", ret);
242 return ret; 243 return ret;
@@ -244,8 +245,8 @@ static int max77686_clk_probe(struct platform_device *pdev)
244 } 245 }
245 246
246 if (parent->of_node) { 247 if (parent->of_node) {
247 ret = of_clk_add_hw_provider(parent->of_node, of_clk_max77686_get, 248 ret = devm_of_clk_add_hw_provider(dev, of_clk_max77686_get,
248 drv_data); 249 drv_data);
249 250
250 if (ret < 0) { 251 if (ret < 0) {
251 dev_err(dev, "Failed to register OF clock provider: %d\n", 252 dev_err(dev, "Failed to register OF clock provider: %d\n",
@@ -261,27 +262,11 @@ static int max77686_clk_probe(struct platform_device *pdev)
261 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT); 262 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT);
262 if (ret < 0) { 263 if (ret < 0) {
263 dev_err(dev, "Failed to config low-jitter: %d\n", ret); 264 dev_err(dev, "Failed to config low-jitter: %d\n", ret);
264 goto remove_of_clk_provider; 265 return ret;
265 } 266 }
266 } 267 }
267 268
268 return 0; 269 return 0;
269
270remove_of_clk_provider:
271 if (parent->of_node)
272 of_clk_del_provider(parent->of_node);
273
274 return ret;
275}
276
277static int max77686_clk_remove(struct platform_device *pdev)
278{
279 struct device *parent = pdev->dev.parent;
280
281 if (parent->of_node)
282 of_clk_del_provider(parent->of_node);
283
284 return 0;
285} 270}
286 271
287static const struct platform_device_id max77686_clk_id[] = { 272static const struct platform_device_id max77686_clk_id[] = {
@@ -297,7 +282,6 @@ static struct platform_driver max77686_clk_driver = {
297 .name = "max77686-clk", 282 .name = "max77686-clk",
298 }, 283 },
299 .probe = max77686_clk_probe, 284 .probe = max77686_clk_probe,
300 .remove = max77686_clk_remove,
301 .id_table = max77686_clk_id, 285 .id_table = max77686_clk_id,
302}; 286};
303 287
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 5baa9e051110..1212a9be7e80 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -1148,8 +1148,8 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
1148 pll->div[i].clk = clk; 1148 pll->div[i].clk = clk;
1149 ret = clk_register_clkdev(clk, pll->div[i].name, NULL); 1149 ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
1150 if (ret != 0) 1150 if (ret != 0)
1151 pr_err("%s: %s: register to lookup table failed %ld\n", 1151 pr_err("%s: %s: register to lookup table failed %d\n",
1152 __func__, pll->div[i].name, PTR_ERR(clk)); 1152 __func__, pll->div[i].name, ret);
1153 1153
1154 } 1154 }
1155} 1155}
@@ -1389,6 +1389,7 @@ static void __init clockgen_init(struct device_node *np)
1389 pr_err("%s: Couldn't map %pOF regs\n", __func__, 1389 pr_err("%s: Couldn't map %pOF regs\n", __func__,
1390 guts); 1390 guts);
1391 } 1391 }
1392 of_node_put(guts);
1392 } 1393 }
1393 1394
1394 } 1395 }
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 6a31f7f434ce..a0ae8dc16909 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -121,7 +121,7 @@ static const char * const cpu_src[] = {
121}; 121};
122 122
123static const char * const axi_src[] = { 123static const char * const axi_src[] = {
124 "ck_hsi", "ck_hse", "pll2_p", "pll3_p" 124 "ck_hsi", "ck_hse", "pll2_p"
125}; 125};
126 126
127static const char * const per_src[] = { 127static const char * const per_src[] = {
@@ -225,19 +225,19 @@ static const char * const usart6_src[] = {
225}; 225};
226 226
227static const char * const fdcan_src[] = { 227static const char * const fdcan_src[] = {
228 "ck_hse", "pll3_q", "pll4_q" 228 "ck_hse", "pll3_q", "pll4_q", "pll4_r"
229}; 229};
230 230
231static const char * const sai_src[] = { 231static const char * const sai_src[] = {
232 "pll4_q", "pll3_q", "i2s_ckin", "ck_per" 232 "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
233}; 233};
234 234
235static const char * const sai2_src[] = { 235static const char * const sai2_src[] = {
236 "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb" 236 "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r"
237}; 237};
238 238
239static const char * const adc12_src[] = { 239static const char * const adc12_src[] = {
240 "pll4_q", "ck_per" 240 "pll4_r", "ck_per", "pll3_q"
241}; 241};
242 242
243static const char * const dsi_src[] = { 243static const char * const dsi_src[] = {
@@ -269,7 +269,7 @@ static const struct clk_div_table axi_div_table[] = {
269static const struct clk_div_table mcu_div_table[] = { 269static const struct clk_div_table mcu_div_table[] = {
270 { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 }, 270 { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
271 { 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 }, 271 { 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 },
272 { 8, 512 }, { 9, 512 }, { 10, 512}, { 11, 512 }, 272 { 8, 256 }, { 9, 512 }, { 10, 512}, { 11, 512 },
273 { 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 }, 273 { 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 },
274 { 0 }, 274 { 0 },
275}; 275};
@@ -1286,10 +1286,11 @@ _clk_stm32_register_composite(struct device *dev,
1286 MGATE_MP1(_id, _name, _parent, _flags, _mgate) 1286 MGATE_MP1(_id, _name, _parent, _flags, _mgate)
1287 1287
1288#define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\ 1288#define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\
1289 COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE | _flags,\ 1289 COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE |\
1290 _MGATE_MP1(_mgate),\ 1290 CLK_SET_RATE_NO_REPARENT | _flags,\
1291 _MMUX(_mmux),\ 1291 _MGATE_MP1(_mgate),\
1292 _NO_DIV) 1292 _MMUX(_mmux),\
1293 _NO_DIV)
1293 1294
1294enum { 1295enum {
1295 G_SAI1, 1296 G_SAI1,
@@ -1655,12 +1656,14 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
1655 1656
1656static const struct clock_config stm32mp1_clock_cfg[] = { 1657static const struct clock_config stm32mp1_clock_cfg[] = {
1657 /* Oscillator divider */ 1658 /* Oscillator divider */
1658 DIV(NO_ID, "clk-hsi-div", "clk-hsi", 0, RCC_HSICFGR, 0, 2, 1659 DIV(NO_ID, "clk-hsi-div", "clk-hsi", CLK_DIVIDER_POWER_OF_TWO,
1659 CLK_DIVIDER_READ_ONLY), 1660 RCC_HSICFGR, 0, 2, CLK_DIVIDER_READ_ONLY),
1660 1661
1661 /* External / Internal Oscillators */ 1662 /* External / Internal Oscillators */
1662 GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0), 1663 GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
1663 GATE_MP1(CK_CSI, "ck_csi", "clk-csi", 0, RCC_OCENSETR, 4, 0), 1664 /* ck_csi is used by IO compensation and should be critical */
1665 GATE_MP1(CK_CSI, "ck_csi", "clk-csi", CLK_IS_CRITICAL,
1666 RCC_OCENSETR, 4, 0),
1664 GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0), 1667 GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0),
1665 GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0), 1668 GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
1666 GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0), 1669 GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
@@ -1952,14 +1955,14 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
1952 MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU), 1955 MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU),
1953 MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12), 1956 MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12),
1954 1957
1955 COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE, 1958 COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE |
1959 CLK_SET_RATE_NO_REPARENT,
1956 _NO_GATE, 1960 _NO_GATE,
1957 _MMUX(M_ETHCK), 1961 _MMUX(M_ETHCK),
1958 _DIV(RCC_ETHCKSELR, 4, 4, CLK_DIVIDER_ALLOW_ZERO, NULL)), 1962 _DIV(RCC_ETHCKSELR, 4, 4, 0, NULL)),
1959 1963
1960 /* RTC clock */ 1964 /* RTC clock */
1961 DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 7, 1965 DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 6, 0),
1962 CLK_DIVIDER_ALLOW_ZERO),
1963 1966
1964 COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE | 1967 COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE |
1965 CLK_SET_RATE_PARENT, 1968 CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index ea846f77750b..0cad5748bf0e 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
41 return pdmclk->enabled; 41 return pdmclk->enabled;
42} 42}
43 43
44static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
45 unsigned int reg)
46{
47 const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */
48 int ret;
49
50 ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
51 if (ret < 0)
52 return ret;
53
54 ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
55 if (ret < 0)
56 return ret;
57
58 return 0;
59}
60
61/*
62 * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
63 * Cold Temperature". This affects cold boot and deeper idle states it
64 * seems. The workaround consists of resetting HPPLL and LPPLL.
65 */
66static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
67{
68 int ret;
69
70 ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
71 if (ret)
72 return ret;
73
74 ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
75 if (ret)
76 return ret;
77
78 return 0;
79}
80
44static int twl6040_pdmclk_prepare(struct clk_hw *hw) 81static int twl6040_pdmclk_prepare(struct clk_hw *hw)
45{ 82{
46 struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk, 83 struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
@@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw)
48 int ret; 85 int ret;
49 86
50 ret = twl6040_power(pdmclk->twl6040, 1); 87 ret = twl6040_power(pdmclk->twl6040, 1);
51 if (!ret) 88 if (ret)
52 pdmclk->enabled = 1; 89 return ret;
90
91 ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
92 if (ret)
93 goto out_err;
94
95 pdmclk->enabled = 1;
96
97 return 0;
98
99out_err:
100 dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
101 twl6040_power(pdmclk->twl6040, 0);
53 102
54 return ret; 103 return ret;
55} 104}
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 5b393e711e94..7d16ab0784ec 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index)
262 262
263 if (vc5->clk_mux_ins == VC5_MUX_IN_XIN) 263 if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
264 src = VC5_PRIM_SRC_SHDN_EN_XTAL; 264 src = VC5_PRIM_SRC_SHDN_EN_XTAL;
265 if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) 265 else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
266 src = VC5_PRIM_SRC_SHDN_EN_CLKIN; 266 src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
267 else /* Invalid; should have been caught by vc5_probe() */
268 return -EINVAL;
267 } 269 }
268 270
269 return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src); 271 return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 75d13c0eff12..af3882f04080 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -394,16 +394,19 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
394{ 394{
395 return clk_core_is_prepared(hw->core); 395 return clk_core_is_prepared(hw->core);
396} 396}
397EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
397 398
398bool clk_hw_rate_is_protected(const struct clk_hw *hw) 399bool clk_hw_rate_is_protected(const struct clk_hw *hw)
399{ 400{
400 return clk_core_rate_is_protected(hw->core); 401 return clk_core_rate_is_protected(hw->core);
401} 402}
403EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
402 404
403bool clk_hw_is_enabled(const struct clk_hw *hw) 405bool clk_hw_is_enabled(const struct clk_hw *hw)
404{ 406{
405 return clk_core_is_enabled(hw->core); 407 return clk_core_is_enabled(hw->core);
406} 408}
409EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
407 410
408bool __clk_is_enabled(struct clk *clk) 411bool __clk_is_enabled(struct clk *clk)
409{ 412{
@@ -1513,9 +1516,19 @@ static int clk_fetch_parent_index(struct clk_core *core,
1513 if (!parent) 1516 if (!parent)
1514 return -EINVAL; 1517 return -EINVAL;
1515 1518
1516 for (i = 0; i < core->num_parents; i++) 1519 for (i = 0; i < core->num_parents; i++) {
1517 if (clk_core_get_parent_by_index(core, i) == parent) 1520 if (core->parents[i] == parent)
1521 return i;
1522
1523 if (core->parents[i])
1524 continue;
1525
1526 /* Fallback to comparing globally unique names */
1527 if (!strcmp(parent->name, core->parent_names[i])) {
1528 core->parents[i] = parent;
1518 return i; 1529 return i;
1530 }
1531 }
1519 1532
1520 return -EINVAL; 1533 return -EINVAL;
1521} 1534}
@@ -2779,7 +2792,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2779 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 2792 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
2780 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); 2793 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2781 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); 2794 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2782 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); 2795 seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
2783 seq_printf(s, "\"duty_cycle\": %u", 2796 seq_printf(s, "\"duty_cycle\": %u",
2784 clk_core_get_scaled_duty_cycle(c, 100000)); 2797 clk_core_get_scaled_duty_cycle(c, 100000));
2785} 2798}
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 9ab3db8b3988..4cfe39636105 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -52,6 +52,12 @@ struct clk *of_clk_get(struct device_node *np, int index)
52} 52}
53EXPORT_SYMBOL(of_clk_get); 53EXPORT_SYMBOL(of_clk_get);
54 54
55/*
56 * Beware the return values when np is valid, but no clock provider is found.
57 * If name == NULL, the function returns -ENOENT.
58 * If name != NULL, the function returns -EINVAL. This is because __of_clk_get()
59 * is called even if of_property_match_string() returns an error.
60 */
55static struct clk *__of_clk_get_by_name(struct device_node *np, 61static struct clk *__of_clk_get_by_name(struct device_node *np,
56 const char *dev_id, 62 const char *dev_id,
57 const char *name) 63 const char *name)
@@ -401,6 +407,23 @@ static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
401 return cl; 407 return cl;
402} 408}
403 409
410static int do_clk_register_clkdev(struct clk_hw *hw,
411 struct clk_lookup **cl, const char *con_id, const char *dev_id)
412{
413 if (IS_ERR(hw))
414 return PTR_ERR(hw);
415 /*
416 * Since dev_id can be NULL, and NULL is handled specially, we must
417 * pass it as either a NULL format string, or with "%s".
418 */
419 if (dev_id)
420 *cl = __clk_register_clkdev(hw, con_id, "%s", dev_id);
421 else
422 *cl = __clk_register_clkdev(hw, con_id, NULL);
423
424 return *cl ? 0 : -ENOMEM;
425}
426
404/** 427/**
405 * clk_register_clkdev - register one clock lookup for a struct clk 428 * clk_register_clkdev - register one clock lookup for a struct clk
406 * @clk: struct clk to associate with all clk_lookups 429 * @clk: struct clk to associate with all clk_lookups
@@ -423,17 +446,8 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
423 if (IS_ERR(clk)) 446 if (IS_ERR(clk))
424 return PTR_ERR(clk); 447 return PTR_ERR(clk);
425 448
426 /* 449 return do_clk_register_clkdev(__clk_get_hw(clk), &cl, con_id,
427 * Since dev_id can be NULL, and NULL is handled specially, we must 450 dev_id);
428 * pass it as either a NULL format string, or with "%s".
429 */
430 if (dev_id)
431 cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, "%s",
432 dev_id);
433 else
434 cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, NULL);
435
436 return cl ? 0 : -ENOMEM;
437} 451}
438EXPORT_SYMBOL(clk_register_clkdev); 452EXPORT_SYMBOL(clk_register_clkdev);
439 453
@@ -456,18 +470,75 @@ int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
456{ 470{
457 struct clk_lookup *cl; 471 struct clk_lookup *cl;
458 472
459 if (IS_ERR(hw)) 473 return do_clk_register_clkdev(hw, &cl, con_id, dev_id);
460 return PTR_ERR(hw); 474}
475EXPORT_SYMBOL(clk_hw_register_clkdev);
461 476
462 /* 477static void devm_clkdev_release(struct device *dev, void *res)
463 * Since dev_id can be NULL, and NULL is handled specially, we must 478{
464 * pass it as either a NULL format string, or with "%s". 479 clkdev_drop(*(struct clk_lookup **)res);
465 */ 480}
466 if (dev_id) 481
467 cl = __clk_register_clkdev(hw, con_id, "%s", dev_id); 482static int devm_clk_match_clkdev(struct device *dev, void *res, void *data)
468 else 483{
469 cl = __clk_register_clkdev(hw, con_id, NULL); 484 struct clk_lookup **l = res;
470 485
471 return cl ? 0 : -ENOMEM; 486 return *l == data;
472} 487}
473EXPORT_SYMBOL(clk_hw_register_clkdev); 488
489/**
490 * devm_clk_release_clkdev - Resource managed clkdev lookup release
491 * @dev: device this lookup is bound
492 * @con_id: connection ID string on device
493 * @dev_id: format string describing device name
494 *
495 * Drop the clkdev lookup created with devm_clk_hw_register_clkdev.
496 * Normally this function will not need to be called and the resource
497 * management code will ensure that the resource is freed.
498 */
499void devm_clk_release_clkdev(struct device *dev, const char *con_id,
500 const char *dev_id)
501{
502 struct clk_lookup *cl;
503 int rval;
504
505 cl = clk_find(dev_id, con_id);
506 WARN_ON(!cl);
507 rval = devres_release(dev, devm_clkdev_release,
508 devm_clk_match_clkdev, cl);
509 WARN_ON(rval);
510}
511EXPORT_SYMBOL(devm_clk_release_clkdev);
512
513/**
514 * devm_clk_hw_register_clkdev - managed clk lookup registration for clk_hw
515 * @dev: device this lookup is bound
516 * @hw: struct clk_hw to associate with all clk_lookups
517 * @con_id: connection ID string on device
518 * @dev_id: format string describing device name
519 *
520 * con_id or dev_id may be NULL as a wildcard, just as in the rest of
521 * clkdev.
522 *
523 * To make things easier for mass registration, we detect error clk_hws
524 * from a previous clk_hw_register_*() call, and return the error code for
525 * those. This is to permit this function to be called immediately
526 * after clk_hw_register_*().
527 */
528int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
529 const char *con_id, const char *dev_id)
530{
531 int rval = -ENOMEM;
532 struct clk_lookup **cl;
533
534 cl = devres_alloc(devm_clkdev_release, sizeof(*cl), GFP_KERNEL);
535 if (cl) {
536 rval = do_clk_register_clkdev(hw, cl, con_id, dev_id);
537 if (!rval)
538 devres_add(dev, cl);
539 else
540 devres_free(cl);
541 }
542 return rval;
543}
544EXPORT_SYMBOL(devm_clk_hw_register_clkdev);
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 4aae31a23449..0eaf41848280 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -8,6 +8,12 @@ config MXC_CLK_SCU
8 bool 8 bool
9 depends on IMX_SCU 9 depends on IMX_SCU
10 10
11config CLK_IMX8MM
12 bool "IMX8MM CCM Clock Driver"
13 depends on ARCH_MXC && ARM64
14 help
15 Build the driver for i.MX8MM CCM Clock Driver
16
11config CLK_IMX8MQ 17config CLK_IMX8MQ
12 bool "IMX8MQ CCM Clock Driver" 18 bool "IMX8MQ CCM Clock Driver"
13 depends on ARCH_MXC && ARM64 19 depends on ARCH_MXC && ARM64
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 73119fbfa547..0d5180fbe988 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -18,12 +18,14 @@ obj-$(CONFIG_MXC_CLK) += \
18 clk-pllv2.o \ 18 clk-pllv2.o \
19 clk-pllv3.o \ 19 clk-pllv3.o \
20 clk-pllv4.o \ 20 clk-pllv4.o \
21 clk-sccg-pll.o 21 clk-sccg-pll.o \
22 clk-pll14xx.o
22 23
23obj-$(CONFIG_MXC_CLK_SCU) += \ 24obj-$(CONFIG_MXC_CLK_SCU) += \
24 clk-scu.o \ 25 clk-scu.o \
25 clk-lpcg-scu.o 26 clk-lpcg-scu.o
26 27
28obj-$(CONFIG_CLK_IMX8MM) += clk-imx8mm.o
27obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o 29obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
28obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o 30obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o
29 31
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 527ade1d6933..574fac1a169f 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -123,7 +123,7 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = {
123}; 123};
124 124
125struct clk *imx8m_clk_composite_flags(const char *name, 125struct clk *imx8m_clk_composite_flags(const char *name,
126 const char **parent_names, 126 const char * const *parent_names,
127 int num_parents, void __iomem *reg, 127 int num_parents, void __iomem *reg,
128 unsigned long flags) 128 unsigned long flags)
129{ 129{
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index 0026c3969b1e..76b9eb15604e 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
155{ 155{
156 struct clk_frac_pll *pll = to_clk_frac_pll(hw); 156 struct clk_frac_pll *pll = to_clk_frac_pll(hw);
157 u32 val, divfi, divff; 157 u32 val, divfi, divff;
158 u64 temp64 = parent_rate; 158 u64 temp64;
159 int ret; 159 int ret;
160 160
161 parent_rate *= 8; 161 parent_rate *= 8;
162 rate *= 2; 162 rate *= 2;
163 divfi = rate / parent_rate; 163 divfi = rate / parent_rate;
164 temp64 *= rate - divfi; 164 temp64 = parent_rate * divfi;
165 temp64 = rate - temp64;
165 temp64 *= PLL_FRAC_DENOM; 166 temp64 *= PLL_FRAC_DENOM;
166 do_div(temp64, parent_rate); 167 do_div(temp64, parent_rate);
167 divff = temp64; 168 divff = temp64;
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index fc8e782d817b..e91c826bce70 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -428,6 +428,7 @@ static void __init mx51_clocks_init(struct device_node *np)
428 clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14); 428 clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
429 clk[IMX5_CLK_USB_PHY_GATE] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0); 429 clk[IMX5_CLK_USB_PHY_GATE] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
430 clk[IMX5_CLK_HSI2C_GATE] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22); 430 clk[IMX5_CLK_HSI2C_GATE] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
431 clk[IMX5_CLK_SCC2_IPG_GATE] = imx_clk_gate2("scc2_gate", "ipg", MXC_CCM_CCGR1, 30);
431 clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2_flags("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6, CLK_IS_CRITICAL); 432 clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2_flags("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6, CLK_IS_CRITICAL);
432 clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2_flags("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8, CLK_IS_CRITICAL); 433 clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2_flags("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8, CLK_IS_CRITICAL);
433 clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2_flags("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10, CLK_IS_CRITICAL); 434 clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2_flags("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10, CLK_IS_CRITICAL);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 716eac3136b4..708e7c5590dd 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -471,6 +471,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
471 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop"); 471 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
472 anatop_base = base = of_iomap(np, 0); 472 anatop_base = base = of_iomap(np, 0);
473 WARN_ON(!base); 473 WARN_ON(!base);
474 of_node_put(np);
474 475
475 /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */ 476 /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
476 if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) { 477 if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) {
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index 18527a335ace..91558b09bf9e 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -151,6 +151,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
151 np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop"); 151 np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop");
152 base = of_iomap(np, 0); 152 base = of_iomap(np, 0);
153 WARN_ON(!base); 153 WARN_ON(!base);
154 of_node_put(np);
154 155
155 clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); 156 clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
156 clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); 157 clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 06c105d580a4..cfbd8d4edb85 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -404,6 +404,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
404 np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop"); 404 np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop");
405 base = of_iomap(np, 0); 405 base = of_iomap(np, 0);
406 WARN_ON(!base); 406 WARN_ON(!base);
407 of_node_put(np);
407 408
408 clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); 409 clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
409 clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); 410 clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 4e18f629f823..ce306631e844 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -48,8 +48,8 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
48 struct clk_hw **clks; 48 struct clk_hw **clks;
49 void __iomem *base; 49 void __iomem *base;
50 50
51 clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * 51 clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SCG1_END),
52 IMX7ULP_CLK_SCG1_END, GFP_KERNEL); 52 GFP_KERNEL);
53 if (!clk_data) 53 if (!clk_data)
54 return; 54 return;
55 55
@@ -136,8 +136,8 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
136 struct clk_hw **clks; 136 struct clk_hw **clks;
137 void __iomem *base; 137 void __iomem *base;
138 138
139 clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * 139 clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
140 IMX7ULP_CLK_PCC2_END, GFP_KERNEL); 140 GFP_KERNEL);
141 if (!clk_data) 141 if (!clk_data)
142 return; 142 return;
143 143
@@ -183,8 +183,8 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
183 struct clk_hw **clks; 183 struct clk_hw **clks;
184 void __iomem *base; 184 void __iomem *base;
185 185
186 clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * 186 clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
187 IMX7ULP_CLK_PCC3_END, GFP_KERNEL); 187 GFP_KERNEL);
188 if (!clk_data) 188 if (!clk_data)
189 return; 189 return;
190 190
@@ -228,8 +228,8 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np)
228 struct clk_hw **clks; 228 struct clk_hw **clks;
229 void __iomem *base; 229 void __iomem *base;
230 230
231 clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * 231 clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SMC1_END),
232 IMX7ULP_CLK_SMC1_END, GFP_KERNEL); 232 GFP_KERNEL);
233 if (!clk_data) 233 if (!clk_data)
234 return; 234 return;
235 235
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
new file mode 100644
index 000000000000..1ef8438e3d6d
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -0,0 +1,675 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2017-2018 NXP.
4 */
5
6#include <dt-bindings/clock/imx8mm-clock.h>
7#include <linux/clk.h>
8#include <linux/err.h>
9#include <linux/init.h>
10#include <linux/io.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/of_address.h>
14#include <linux/platform_device.h>
15#include <linux/types.h>
16
17#include "clk.h"
18
19static u32 share_count_sai1;
20static u32 share_count_sai2;
21static u32 share_count_sai3;
22static u32 share_count_sai4;
23static u32 share_count_sai5;
24static u32 share_count_sai6;
25static u32 share_count_dcss;
26static u32 share_count_pdm;
27static u32 share_count_nand;
28
29#define PLL_1416X_RATE(_rate, _m, _p, _s) \
30 { \
31 .rate = (_rate), \
32 .mdiv = (_m), \
33 .pdiv = (_p), \
34 .sdiv = (_s), \
35 }
36
37#define PLL_1443X_RATE(_rate, _m, _p, _s, _k) \
38 { \
39 .rate = (_rate), \
40 .mdiv = (_m), \
41 .pdiv = (_p), \
42 .sdiv = (_s), \
43 .kdiv = (_k), \
44 }
45
46static const struct imx_pll14xx_rate_table imx8mm_pll1416x_tbl[] = {
47 PLL_1416X_RATE(1800000000U, 225, 3, 0),
48 PLL_1416X_RATE(1600000000U, 200, 3, 0),
49 PLL_1416X_RATE(1200000000U, 300, 3, 1),
50 PLL_1416X_RATE(1000000000U, 250, 3, 1),
51 PLL_1416X_RATE(800000000U, 200, 3, 1),
52 PLL_1416X_RATE(750000000U, 250, 2, 2),
53 PLL_1416X_RATE(700000000U, 350, 3, 2),
54 PLL_1416X_RATE(600000000U, 300, 3, 2),
55};
56
57static const struct imx_pll14xx_rate_table imx8mm_audiopll_tbl[] = {
58 PLL_1443X_RATE(786432000U, 655, 5, 2, 23593),
59 PLL_1443X_RATE(722534400U, 301, 5, 1, 3670),
60};
61
62static const struct imx_pll14xx_rate_table imx8mm_videopll_tbl[] = {
63 PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
64 PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
65};
66
67static const struct imx_pll14xx_rate_table imx8mm_drampll_tbl[] = {
68 PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
69};
70
71static struct imx_pll14xx_clk imx8mm_audio_pll __initdata = {
72 .type = PLL_1443X,
73 .rate_table = imx8mm_audiopll_tbl,
74 .rate_count = ARRAY_SIZE(imx8mm_audiopll_tbl),
75};
76
77static struct imx_pll14xx_clk imx8mm_video_pll __initdata = {
78 .type = PLL_1443X,
79 .rate_table = imx8mm_videopll_tbl,
80 .rate_count = ARRAY_SIZE(imx8mm_videopll_tbl),
81};
82
83static struct imx_pll14xx_clk imx8mm_dram_pll __initdata = {
84 .type = PLL_1443X,
85 .rate_table = imx8mm_drampll_tbl,
86 .rate_count = ARRAY_SIZE(imx8mm_drampll_tbl),
87};
88
89static struct imx_pll14xx_clk imx8mm_arm_pll __initdata = {
90 .type = PLL_1416X,
91 .rate_table = imx8mm_pll1416x_tbl,
92 .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
93};
94
95static struct imx_pll14xx_clk imx8mm_gpu_pll __initdata = {
96 .type = PLL_1416X,
97 .rate_table = imx8mm_pll1416x_tbl,
98 .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
99};
100
101static struct imx_pll14xx_clk imx8mm_vpu_pll __initdata = {
102 .type = PLL_1416X,
103 .rate_table = imx8mm_pll1416x_tbl,
104 .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
105};
106
107static struct imx_pll14xx_clk imx8mm_sys_pll __initdata = {
108 .type = PLL_1416X,
109 .rate_table = imx8mm_pll1416x_tbl,
110 .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
111};
112
113static const char *pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
114static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
115static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
116static const char *video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
117static const char *dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
118static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
119static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
120static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
121static const char *sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
122static const char *sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
123static const char *sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
124
125/* CCM ROOT */
126static const char *imx8mm_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m",
127 "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "sys_pll3_out", };
128
129static const char *imx8mm_m4_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "sys_pll1_266m",
130 "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
131
132static const char *imx8mm_vpu_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m",
133 "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "vpu_pll_out", };
134
135static const char *imx8mm_gpu3d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out",
136 "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
137
138static const char *imx8mm_gpu2d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out",
139 "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
140
141static const char *imx8mm_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m", "sys_pll2_250m",
142 "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "sys_pll1_100m",};
143
144static const char *imx8mm_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_250m",
145 "sys_pll2_200m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
146
147static const char *imx8mm_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_200m",
148 "sys_pll1_133m", "sys_pll3_out", "sys_pll2_250m", "audio_pll1_out", };
149
150static const char *imx8mm_vpu_bus_sels[] = {"osc_24m", "sys_pll1_800m", "vpu_pll_out", "audio_pll2_out",
151 "sys_pll3_out", "sys_pll2_1000m", "sys_pll2_200m", "sys_pll1_100m", };
152
153static const char *imx8mm_disp_axi_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m", "sys_pll3_out",
154 "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "clk_ext4", };
155
156static const char *imx8mm_disp_apb_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll1_800m", "sys_pll3_out",
157 "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", };
158
159static const char *imx8mm_disp_rtrm_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll2_200m", "sys_pll2_1000m",
160 "audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", };
161
162static const char *imx8mm_usb_bus_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_100m",
163 "sys_pll2_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", };
164
165static const char *imx8mm_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out", "sys_pll2_1000m",
166 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
167
168static const char *imx8mm_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out", "sys_pll2_1000m",
169 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
170
171static const char *imx8mm_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_1000m", "sys_pll2_500m",
172 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
173
174static const char *imx8mm_noc_apb_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll3_out", "sys_pll2_333m", "sys_pll2_200m",
175 "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", };
176
177static const char *imx8mm_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m", "sys_pll1_400m",
178 "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", };
179
180static const char *imx8mm_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_1000m",
181 "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", };
182
183static const char *imx8mm_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m", "sys_pll2_500m",
184 "sys_pll2_1000m", "sys_pll3_out", "audio_pll1_out", "sys_pll1_266m", };
185
186static const char *imx8mm_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
187 "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
188
189static const char *imx8mm_vpu_g1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
190 "sys_pll1_100m", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", };
191
192static const char *imx8mm_vpu_g2_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
193 "sys_pll1_100m", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", };
194
195static const char *imx8mm_disp_dtrc_sels[] = {"osc_24m", "video_pll2_out", "sys_pll1_800m", "sys_pll2_1000m",
196 "sys_pll1_160m", "video_pll1_out", "sys_pll3_out", "audio_pll2_out", };
197
198static const char *imx8mm_disp_dc8000_sels[] = {"osc_24m", "video_pll2_out", "sys_pll1_800m", "sys_pll2_1000m",
199 "sys_pll1_160m", "video_pll1_out", "sys_pll3_out", "audio_pll2_out", };
200
201static const char *imx8mm_pcie1_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m", "sys_pll1_266m",
202 "sys_pll1_800m", "sys_pll2_500m", "sys_pll2_333m", "sys_pll3_out", };
203
204static const char *imx8mm_pcie1_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m", "clk_ext1", "clk_ext2",
205 "clk_ext3", "clk_ext4", "sys_pll1_400m", };
206
207static const char *imx8mm_pcie1_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m", "sys_pll3_out",
208 "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_160m", "sys_pll1_200m", };
209
210static const char *imx8mm_dc_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out",
211 "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", };
212
213static const char *imx8mm_lcdif_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out",
214 "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", };
215
216static const char *imx8mm_sai1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
217 "sys_pll1_133m", "osc_hdmi", "clk_ext1", "clk_ext2", };
218
219static const char *imx8mm_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
220 "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", };
221
222static const char *imx8mm_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
223 "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", };
224
225static const char *imx8mm_sai4_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
226 "sys_pll1_133m", "osc_hdmi", "clk_ext1", "clk_ext2", };
227
228static const char *imx8mm_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
229 "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", };
230
231static const char *imx8mm_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
232 "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", };
233
234static const char *imx8mm_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
235 "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", };
236
237static const char *imx8mm_spdif2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
238 "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", };
239
240static const char *imx8mm_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m", "sys_pll2_100m",
241 "sys_pll1_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", };
242
243static const char *imx8mm_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
244 "clk_ext3", "clk_ext4", "video_pll1_out", };
245
246static const char *imx8mm_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m", "sys_pll2_200m",
247 "sys_pll2_500m", "video_pll1_out", "audio_pll2_out", };
248
249static const char *imx8mm_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out", "sys_pll1_400m",
250 "audio_pll2_out", "sys_pll3_out", "sys_pll2_250m", "video_pll1_out", };
251
252static const char *imx8mm_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
253 "audio_pll2_out", "sys_pll1_266m", "sys_pll3_out", "sys_pll1_100m", };
254
255static const char *imx8mm_usdhc1_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
256 "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", };
257
258static const char *imx8mm_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
259 "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", };
260
261static const char *imx8mm_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
262 "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
263
264static const char *imx8mm_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
265 "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
266
267static const char *imx8mm_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
268 "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
269
270static const char *imx8mm_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
271 "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
272
273static const char *imx8mm_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
274 "sys_pll3_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
275
276static const char *imx8mm_uart2_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
277 "sys_pll3_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
278
279static const char *imx8mm_uart3_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
280 "sys_pll3_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
281
282static const char *imx8mm_uart4_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
283 "sys_pll3_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
284
285static const char *imx8mm_usb_core_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m",
286 "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
287
288static const char *imx8mm_usb_phy_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m",
289 "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
290
291static const char *imx8mm_ecspi1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
292 "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
293
294static const char *imx8mm_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
295 "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
296
297static const char *imx8mm_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
298 "sys_pll3_out", "clk_ext1", "sys_pll1_80m", "video_pll1_out", };
299
300static const char *imx8mm_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
301 "sys_pll3_out", "clk_ext1", "sys_pll1_80m", "video_pll1_out", };
302
303static const char *imx8mm_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
304 "sys3_pll2_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", };
305
306static const char *imx8mm_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
307 "sys_pll3_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", };
308
309static const char *imx8mm_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", "sys_pll1_40m",
310 "video_pll1_out", "sys_pll1_800m", "audio_pll1_out", "clk_ext1" };
311
312static const char *imx8mm_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m", "vpu_pll_out",
313 "sys_pll2_125m", "sys_pll3_out", "sys_pll1_80m", "sys_pll2_166m", };
314
315static const char *imx8mm_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out", "sys_pll3_out", "sys_pll2_200m",
316 "sys_pll1_266m", "sys_pll2_500m", "sys_pll1_100m", };
317
318static const char *imx8mm_dsi_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
319 "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
320
321static const char *imx8mm_dsi_phy_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_100m", "sys_pll1_800m",
322 "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
323
324static const char *imx8mm_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_100m", "sys_pll1_800m",
325 "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
326
327static const char *imx8mm_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
328 "sys_pll3_out", "sys_pll1_266m", "audio_pll2_clk", "sys_pll1_100m", };
329
330static const char *imx8mm_csi1_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
331 "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
332
333static const char *imx8mm_csi1_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m",
334 "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
335
336static const char *imx8mm_csi1_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m",
337 "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
338
339static const char *imx8mm_csi2_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
340 "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
341
342static const char *imx8mm_csi2_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m",
343 "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
344
345static const char *imx8mm_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m",
346 "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
347
348static const char *imx8mm_pcie2_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m", "sys_pll1_266m",
349 "sys_pll1_800m", "sys_pll2_500m", "sys_pll2_333m", "sys_pll3_out", };
350
351static const char *imx8mm_pcie2_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m", "clk_ext1",
352 "clk_ext2", "clk_ext3", "clk_ext4", "sys_pll1_400m", };
353
354static const char *imx8mm_pcie2_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m", "sys_pll3_out",
355 "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_160m", "sys_pll1_200m", };
356
357static const char *imx8mm_ecspi3_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
358 "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
359
360static const char *imx8mm_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out", "sys_pll1_800m",
361 "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
362
363static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
364 "audio_pll2_clk", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", };
365
366static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
367
368static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_clk",
369 "vpu_pll", "sys_pll1_80m", };
370
371static struct clk *clks[IMX8MM_CLK_END];
372static struct clk_onecell_data clk_data;
373
374static struct clk ** const uart_clks[] __initconst = {
375 &clks[IMX8MM_CLK_UART1_ROOT],
376 &clks[IMX8MM_CLK_UART2_ROOT],
377 &clks[IMX8MM_CLK_UART3_ROOT],
378 &clks[IMX8MM_CLK_UART4_ROOT],
379 NULL
380};
381
382static int __init imx8mm_clocks_init(struct device_node *ccm_node)
383{
384 struct device_node *np;
385 void __iomem *base;
386 int ret;
387
388 clks[IMX8MM_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
389 clks[IMX8MM_CLK_24M] = of_clk_get_by_name(ccm_node, "osc_24m");
390 clks[IMX8MM_CLK_32K] = of_clk_get_by_name(ccm_node, "osc_32k");
391 clks[IMX8MM_CLK_EXT1] = of_clk_get_by_name(ccm_node, "clk_ext1");
392 clks[IMX8MM_CLK_EXT2] = of_clk_get_by_name(ccm_node, "clk_ext2");
393 clks[IMX8MM_CLK_EXT3] = of_clk_get_by_name(ccm_node, "clk_ext3");
394 clks[IMX8MM_CLK_EXT4] = of_clk_get_by_name(ccm_node, "clk_ext4");
395
396 np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
397 base = of_iomap(np, 0);
398 if (WARN_ON(!base))
399 return -ENOMEM;
400
401 clks[IMX8MM_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
402 clks[IMX8MM_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
403 clks[IMX8MM_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
404 clks[IMX8MM_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
405 clks[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
406 clks[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
407 clks[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
408 clks[IMX8MM_SYS_PLL1_REF_SEL] = imx_clk_mux("sys_pll1_ref_sel", base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
409 clks[IMX8MM_SYS_PLL2_REF_SEL] = imx_clk_mux("sys_pll2_ref_sel", base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
410 clks[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
411
412 clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx8mm_audio_pll);
413 clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx8mm_audio_pll);
414 clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx8mm_video_pll);
415 clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx8mm_dram_pll);
416 clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx8mm_gpu_pll);
417 clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx8mm_vpu_pll);
418 clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx8mm_arm_pll);
419 clks[IMX8MM_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx8mm_sys_pll);
420 clks[IMX8MM_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx8mm_sys_pll);
421 clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mm_sys_pll);
422
423 /* PLL bypass out */
424 clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
425 clks[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
426 clks[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
427 clks[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
428 clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
429 clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
430 clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
431 clks[IMX8MM_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
432 clks[IMX8MM_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
433 clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
434
435 /* unbypass all the plls */
436 clk_set_parent(clks[IMX8MM_AUDIO_PLL1_BYPASS], clks[IMX8MM_AUDIO_PLL1]);
437 clk_set_parent(clks[IMX8MM_AUDIO_PLL2_BYPASS], clks[IMX8MM_AUDIO_PLL2]);
438 clk_set_parent(clks[IMX8MM_VIDEO_PLL1_BYPASS], clks[IMX8MM_VIDEO_PLL1]);
439 clk_set_parent(clks[IMX8MM_DRAM_PLL_BYPASS], clks[IMX8MM_DRAM_PLL]);
440 clk_set_parent(clks[IMX8MM_GPU_PLL_BYPASS], clks[IMX8MM_GPU_PLL]);
441 clk_set_parent(clks[IMX8MM_VPU_PLL_BYPASS], clks[IMX8MM_VPU_PLL]);
442 clk_set_parent(clks[IMX8MM_ARM_PLL_BYPASS], clks[IMX8MM_ARM_PLL]);
443 clk_set_parent(clks[IMX8MM_SYS_PLL1_BYPASS], clks[IMX8MM_SYS_PLL1]);
444 clk_set_parent(clks[IMX8MM_SYS_PLL2_BYPASS], clks[IMX8MM_SYS_PLL2]);
445 clk_set_parent(clks[IMX8MM_SYS_PLL3_BYPASS], clks[IMX8MM_SYS_PLL3]);
446
447 /* PLL out gate */
448 clks[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
449 clks[IMX8MM_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
450 clks[IMX8MM_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
451 clks[IMX8MM_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
452 clks[IMX8MM_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 13);
453 clks[IMX8MM_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 13);
454 clks[IMX8MM_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 13);
455 clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 13);
456 clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 13);
457 clks[IMX8MM_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 13);
458
459 /* SYS PLL fixed output */
460 clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
461 clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
462 clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
463 clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
464 clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
465 clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
466 clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
467 clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
468 clks[IMX8MM_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
469
470 clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
471 clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
472 clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
473 clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
474 clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
475 clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
476 clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
477 clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
478 clks[IMX8MM_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
479
480 np = ccm_node;
481 base = of_iomap(np, 0);
482 if (WARN_ON(!base))
483 return -ENOMEM;
484
485 /* Core Slice */
486 clks[IMX8MM_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
487 clks[IMX8MM_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
488 clks[IMX8MM_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
489 clks[IMX8MM_CLK_GPU3D_SRC] = imx_clk_mux2("gpu3d_src", base + 0x8180, 24, 3, imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
490 clks[IMX8MM_CLK_GPU2D_SRC] = imx_clk_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels, ARRAY_SIZE(imx8mm_gpu2d_sels));
491 clks[IMX8MM_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
492 clks[IMX8MM_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
493 clks[IMX8MM_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
494 clks[IMX8MM_CLK_GPU3D_CG] = imx_clk_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
495 clks[IMX8MM_CLK_GPU2D_CG] = imx_clk_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
496 clks[IMX8MM_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
497 clks[IMX8MM_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
498 clks[IMX8MM_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
499 clks[IMX8MM_CLK_GPU3D_DIV] = imx_clk_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
500 clks[IMX8MM_CLK_GPU2D_DIV] = imx_clk_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
501
502 /* BUS */
503 clks[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
504 clks[IMX8MM_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
505 clks[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
506 clks[IMX8MM_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
507 clks[IMX8MM_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
508 clks[IMX8MM_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
509 clks[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
510 clks[IMX8MM_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
511 clks[IMX8MM_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
512 clks[IMX8MM_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
513 clks[IMX8MM_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
514 clks[IMX8MM_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
515
516 /* AHB */
517 clks[IMX8MM_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
518 clks[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
519
520 /* IPG */
521 clks[IMX8MM_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
522 clks[IMX8MM_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
523
524 /* IP */
525 clks[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000);
526 clks[IMX8MM_CLK_DRAM_APB] = imx8m_clk_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080);
527 clks[IMX8MM_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
528 clks[IMX8MM_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180);
529 clks[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200);
530 clks[IMX8MM_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mm_disp_dc8000_sels, base + 0xa280);
531 clks[IMX8MM_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mm_pcie1_ctrl_sels, base + 0xa300);
532 clks[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380);
533 clks[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400);
534 clks[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480);
535 clks[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500);
536 clks[IMX8MM_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mm_sai1_sels, base + 0xa580);
537 clks[IMX8MM_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mm_sai2_sels, base + 0xa600);
538 clks[IMX8MM_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mm_sai3_sels, base + 0xa680);
539 clks[IMX8MM_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mm_sai4_sels, base + 0xa700);
540 clks[IMX8MM_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mm_sai5_sels, base + 0xa780);
541 clks[IMX8MM_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mm_sai6_sels, base + 0xa800);
542 clks[IMX8MM_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mm_spdif1_sels, base + 0xa880);
543 clks[IMX8MM_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mm_spdif2_sels, base + 0xa900);
544 clks[IMX8MM_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mm_enet_ref_sels, base + 0xa980);
545 clks[IMX8MM_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mm_enet_timer_sels, base + 0xaa00);
546 clks[IMX8MM_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mm_enet_phy_sels, base + 0xaa80);
547 clks[IMX8MM_CLK_NAND] = imx8m_clk_composite("nand", imx8mm_nand_sels, base + 0xab00);
548 clks[IMX8MM_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mm_qspi_sels, base + 0xab80);
549 clks[IMX8MM_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mm_usdhc1_sels, base + 0xac00);
550 clks[IMX8MM_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mm_usdhc2_sels, base + 0xac80);
551 clks[IMX8MM_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mm_i2c1_sels, base + 0xad00);
552 clks[IMX8MM_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mm_i2c2_sels, base + 0xad80);
553 clks[IMX8MM_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mm_i2c3_sels, base + 0xae00);
554 clks[IMX8MM_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mm_i2c4_sels, base + 0xae80);
555 clks[IMX8MM_CLK_UART1] = imx8m_clk_composite("uart1", imx8mm_uart1_sels, base + 0xaf00);
556 clks[IMX8MM_CLK_UART2] = imx8m_clk_composite("uart2", imx8mm_uart2_sels, base + 0xaf80);
557 clks[IMX8MM_CLK_UART3] = imx8m_clk_composite("uart3", imx8mm_uart3_sels, base + 0xb000);
558 clks[IMX8MM_CLK_UART4] = imx8m_clk_composite("uart4", imx8mm_uart4_sels, base + 0xb080);
559 clks[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100);
560 clks[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180);
561 clks[IMX8MM_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280);
562 clks[IMX8MM_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300);
563 clks[IMX8MM_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380);
564 clks[IMX8MM_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mm_pwm2_sels, base + 0xb400);
565 clks[IMX8MM_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mm_pwm3_sels, base + 0xb480);
566 clks[IMX8MM_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mm_pwm4_sels, base + 0xb500);
567 clks[IMX8MM_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mm_gpt1_sels, base + 0xb580);
568 clks[IMX8MM_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
569 clks[IMX8MM_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
570 clks[IMX8MM_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
571 clks[IMX8MM_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
572 clks[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
573 clks[IMX8MM_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
574 clks[IMX8MM_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mm_usdhc3_sels, base + 0xbc80);
575 clks[IMX8MM_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mm_csi1_core_sels, base + 0xbd00);
576 clks[IMX8MM_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mm_csi1_phy_sels, base + 0xbd80);
577 clks[IMX8MM_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mm_csi1_esc_sels, base + 0xbe00);
578 clks[IMX8MM_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mm_csi2_core_sels, base + 0xbe80);
579 clks[IMX8MM_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mm_csi2_phy_sels, base + 0xbf00);
580 clks[IMX8MM_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mm_csi2_esc_sels, base + 0xbf80);
581 clks[IMX8MM_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mm_pcie2_ctrl_sels, base + 0xc000);
582 clks[IMX8MM_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mm_pcie2_phy_sels, base + 0xc080);
583 clks[IMX8MM_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mm_pcie2_aux_sels, base + 0xc100);
584 clks[IMX8MM_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mm_ecspi3_sels, base + 0xc180);
585 clks[IMX8MM_CLK_PDM] = imx8m_clk_composite("pdm", imx8mm_pdm_sels, base + 0xc200);
586 clks[IMX8MM_CLK_VPU_H1] = imx8m_clk_composite("vpu_h1", imx8mm_vpu_h1_sels, base + 0xc280);
587
588 /* CCGR */
589 clks[IMX8MM_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
590 clks[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
591 clks[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
592 clks[IMX8MM_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
593 clks[IMX8MM_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
594 clks[IMX8MM_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
595 clks[IMX8MM_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
596 clks[IMX8MM_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
597 clks[IMX8MM_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
598 clks[IMX8MM_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
599 clks[IMX8MM_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
600 clks[IMX8MM_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
601 clks[IMX8MM_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
602 clks[IMX8MM_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
603 clks[IMX8MM_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
604 clks[IMX8MM_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
605 clks[IMX8MM_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
606 clks[IMX8MM_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
607 clks[IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
608 clks[IMX8MM_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
609 clks[IMX8MM_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
610 clks[IMX8MM_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
611 clks[IMX8MM_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
612 clks[IMX8MM_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
613 clks[IMX8MM_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
614 clks[IMX8MM_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
615 clks[IMX8MM_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
616 clks[IMX8MM_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
617 clks[IMX8MM_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
618 clks[IMX8MM_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
619 clks[IMX8MM_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
620 clks[IMX8MM_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
621 clks[IMX8MM_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
622 clks[IMX8MM_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
623 clks[IMX8MM_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
624 clks[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0);
625 clks[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
626 clks[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
627 clks[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
628 clks[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
629 clks[IMX8MM_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
630 clks[IMX8MM_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
631 clks[IMX8MM_CLK_VPU_G1_ROOT] = imx_clk_gate4("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0);
632 clks[IMX8MM_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
633 clks[IMX8MM_CLK_VPU_H1_ROOT] = imx_clk_gate4("vpu_h1_root_clk", "vpu_h1", base + 0x4590, 0);
634 clks[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0);
635 clks[IMX8MM_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
636 clks[IMX8MM_CLK_PDM_IPG] = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
637 clks[IMX8MM_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
638 clks[IMX8MM_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
639 clks[IMX8MM_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss);
640 clks[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss);
641 clks[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
642 clks[IMX8MM_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
643 clks[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0);
644 clks[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
645 clks[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
646 clks[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
647 clks[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
648 clks[IMX8MM_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
649
650 clks[IMX8MM_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc_24m", 1, 8);
651
652 clks[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
653 clks[IMX8MM_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
654
655 clks[IMX8MM_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
656 clks[IMX8MM_CLK_A53_DIV],
657 clks[IMX8MM_CLK_A53_SRC],
658 clks[IMX8MM_ARM_PLL_OUT],
659 clks[IMX8MM_CLK_24M]);
660
661 imx_check_clocks(clks, ARRAY_SIZE(clks));
662
663 clk_data.clks = clks;
664 clk_data.clk_num = ARRAY_SIZE(clks);
665 ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
666 if (ret < 0) {
667 pr_err("failed to register clks for i.MX8MM\n");
668 return -EINVAL;
669 }
670
671 imx_register_uart_clocks(uart_clks);
672
673 return 0;
674}
675CLK_OF_DECLARE_DRIVER(imx8mm, "fsl,imx8mm-ccm", imx8mm_clocks_init);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 26b57f43ccc3..a9b3888aef0c 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -26,246 +26,246 @@ static u32 share_count_nand;
26 26
27static struct clk *clks[IMX8MQ_CLK_END]; 27static struct clk *clks[IMX8MQ_CLK_END];
28 28
29static const char *pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", }; 29static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", };
30static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", }; 30static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
31static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", }; 31static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
32static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", }; 32static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
33static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", }; 33static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
34static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", }; 34static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
35static const char *video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", }; 35static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
36 36
37static const char *sys1_pll1_out_sels[] = {"sys1_pll1", "sys1_pll1_ref_sel", }; 37static const char * const sys1_pll_out_sels[] = {"sys1_pll1_ref_sel", };
38static const char *sys2_pll1_out_sels[] = {"sys2_pll1", "sys1_pll1_ref_sel", }; 38static const char * const sys2_pll_out_sels[] = {"sys1_pll1_ref_sel", "sys2_pll1_ref_sel", };
39static const char *sys3_pll1_out_sels[] = {"sys3_pll1", "sys3_pll1_ref_sel", }; 39static const char * const sys3_pll_out_sels[] = {"sys3_pll1_ref_sel", "sys2_pll1_ref_sel", };
40static const char *dram_pll1_out_sels[] = {"dram_pll1", "dram_pll1_ref_sel", }; 40static const char * const dram_pll_out_sels[] = {"dram_pll1_ref_sel", };
41
42static const char *sys1_pll2_out_sels[] = {"sys1_pll2_div", "sys1_pll1_ref_sel", };
43static const char *sys2_pll2_out_sels[] = {"sys2_pll2_div", "sys2_pll1_ref_sel", };
44static const char *sys3_pll2_out_sels[] = {"sys3_pll2_div", "sys2_pll1_ref_sel", };
45static const char *dram_pll2_out_sels[] = {"dram_pll2_div", "dram_pll1_ref_sel", };
46 41
47/* CCM ROOT */ 42/* CCM ROOT */
48static const char *imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m", 43static const char * const imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
49 "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll2_out", }; 44 "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll2_out", };
50 45
51static const char *imx8mq_vpu_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m", 46static const char * const imx8mq_arm_m4_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_250m", "sys1_pll_266m",
47 "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", };
48
49static const char * const imx8mq_vpu_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
52 "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "vpu_pll_out", }; 50 "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "vpu_pll_out", };
53 51
54static const char *imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out", 52static const char * const imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
55 "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; 53 "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
56 54
57static const char *imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out", 55static const char * const imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
58 "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; 56 "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
59 57
60static const char *imx8mq_main_axi_sels[] = {"osc_25m", "sys2_pll_333m", "sys1_pll_800m", "sys2_pll_250m", 58static const char * const imx8mq_main_axi_sels[] = {"osc_25m", "sys2_pll_333m", "sys1_pll_800m", "sys2_pll_250m",
61 "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "sys1_pll_100m",}; 59 "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "sys1_pll_100m",};
62 60
63static const char *imx8mq_enet_axi_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m", 61static const char * const imx8mq_enet_axi_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m",
64 "sys2_pll_200m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", }; 62 "sys2_pll_200m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", };
65 63
66static const char *imx8mq_nand_usdhc_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_200m", 64static const char * const imx8mq_nand_usdhc_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_200m",
67 "sys1_pll_133m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll1_out", }; 65 "sys1_pll_133m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll1_out", };
68 66
69static const char *imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", }; 67static const char * const imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", };
70 68
71static const char *imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", }; 69static const char * const imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", };
72 70
73static const char *imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", 71static const char * const imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out",
74 "sys1_pll_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", }; 72 "sys1_pll_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", };
75 73
76static const char *imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", "sys2_pll_200m", "sys1_pll_400m", 74static const char * const imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", "sys2_pll_200m", "sys1_pll_400m",
77 "audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", }; 75 "audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", };
78 76
79static const char *imx8mq_usb_bus_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m", 77static const char * const imx8mq_usb_bus_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m",
80 "sys2_pll_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", }; 78 "sys2_pll_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", };
81 79
82static const char *imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m", 80static const char * const imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
83 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; 81 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
84 82
85static const char *imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m", 83static const char * const imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
86 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; 84 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
87 85
88static const char *imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_500m", 86static const char * const imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_500m",
89 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; 87 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
90 88
91static const char *imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll2_out", "sys2_pll_333m", "sys2_pll_200m", 89static const char * const imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll2_out", "sys2_pll_333m", "sys2_pll_200m",
92 "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", }; 90 "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", };
93 91
94static const char *imx8mq_ahb_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m", 92static const char * const imx8mq_ahb_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m",
95 "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", }; 93 "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", };
96 94
97static const char *imx8mq_audio_ahb_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_1000m", 95static const char * const imx8mq_audio_ahb_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_1000m",
98 "sys2_pll_166m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", }; 96 "sys2_pll_166m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", };
99 97
100static const char *imx8mq_dsi_ahb_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", 98static const char * const imx8mq_dsi_ahb_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
101 "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out"}; 99 "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out"};
102 100
103static const char *imx8mq_dram_alt_sels[] = {"osc_25m", "sys1_pll_800m", "sys1_pll_100m", "sys2_pll_500m", 101static const char * const imx8mq_dram_alt_sels[] = {"osc_25m", "sys1_pll_800m", "sys1_pll_100m", "sys2_pll_500m",
104 "sys2_pll_250m", "sys1_pll_400m", "audio_pll1_out", "sys1_pll_266m", }; 102 "sys2_pll_250m", "sys1_pll_400m", "audio_pll1_out", "sys1_pll_266m", };
105 103
106static const char *imx8mq_dram_apb_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", 104static const char * const imx8mq_dram_apb_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
107 "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; 105 "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
108 106
109static const char *imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", }; 107static const char * const imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
110 108
111static const char *imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", }; 109static const char * const imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
112 110
113static const char *imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", }; 111static const char * const imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
114 112
115static const char *imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", }; 113static const char * const imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
116 114
117static const char *imx8mq_pcie1_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m", 115static const char * const imx8mq_pcie1_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
118 "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_250m", "sys3_pll2_out", }; 116 "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_250m", "sys3_pll2_out", };
119 117
120static const char *imx8mq_pcie1_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2", 118static const char * const imx8mq_pcie1_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2",
121 "clk_ext3", "clk_ext4", }; 119 "clk_ext3", "clk_ext4", };
122 120
123static const char *imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll2_out", 121static const char * const imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll2_out",
124 "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", }; 122 "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", };
125 123
126static const char *imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", }; 124static const char * const imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
127 125
128static const char *imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", }; 126static const char * const imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
129 127
130static const char *imx8mq_sai1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", }; 128static const char * const imx8mq_sai1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
131 129
132static const char *imx8mq_sai2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", }; 130static const char * const imx8mq_sai2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
133 131
134static const char *imx8mq_sai3_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", }; 132static const char * const imx8mq_sai3_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
135 133
136static const char *imx8mq_sai4_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", }; 134static const char * const imx8mq_sai4_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
137 135
138static const char *imx8mq_sai5_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", }; 136static const char * const imx8mq_sai5_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
139 137
140static const char *imx8mq_sai6_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", }; 138static const char * const imx8mq_sai6_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
141 139
142static const char *imx8mq_spdif1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", }; 140static const char * const imx8mq_spdif1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
143 141
144static const char *imx8mq_spdif2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", }; 142static const char * const imx8mq_spdif2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
145 143
146static const char *imx8mq_enet_ref_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_500m", "sys2_pll_100m", 144static const char * const imx8mq_enet_ref_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_500m", "sys2_pll_100m",
147 "sys1_pll_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", }; 145 "sys1_pll_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", };
148 146
149static const char *imx8mq_enet_timer_sels[] = {"osc_25m", "sys2_pll_100m", "audio_pll1_out", "clk_ext1", "clk_ext2", 147static const char * const imx8mq_enet_timer_sels[] = {"osc_25m", "sys2_pll_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
150 "clk_ext3", "clk_ext4", "video_pll1_out", }; 148 "clk_ext3", "clk_ext4", "video_pll1_out", };
151 149
152static const char *imx8mq_enet_phy_sels[] = {"osc_25m", "sys2_pll_50m", "sys2_pll_125m", "sys2_pll_500m", 150static const char * const imx8mq_enet_phy_sels[] = {"osc_25m", "sys2_pll_50m", "sys2_pll_125m", "sys2_pll_500m",
153 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; 151 "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
154 152
155static const char *imx8mq_nand_sels[] = {"osc_25m", "sys2_pll_500m", "audio_pll1_out", "sys1_pll_400m", 153static const char * const imx8mq_nand_sels[] = {"osc_25m", "sys2_pll_500m", "audio_pll1_out", "sys1_pll_400m",
156 "audio_pll2_out", "sys3_pll2_out", "sys2_pll_250m", "video_pll1_out", }; 154 "audio_pll2_out", "sys3_pll2_out", "sys2_pll_250m", "video_pll1_out", };
157 155
158static const char *imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", 156static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
159 "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; 157 "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
160 158
161static const char *imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", 159static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
162 "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; 160 "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
163 161
164static const char *imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", 162static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
165 "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; 163 "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
166 164
167static const char *imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", 165static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
168 "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; 166 "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
169 167
170static const char *imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", 168static const char * const imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
171 "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; 169 "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
172 170
173static const char *imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", 171static const char * const imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
174 "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; 172 "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
175 173
176static const char *imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", 174static const char * const imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
177 "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; 175 "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
178 176
179static const char *imx8mq_uart1_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", 177static const char * const imx8mq_uart1_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
180 "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; 178 "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
181 179
182static const char *imx8mq_uart2_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", 180static const char * const imx8mq_uart2_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
183 "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; 181 "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
184 182
185static const char *imx8mq_uart3_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", 183static const char * const imx8mq_uart3_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
186 "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; 184 "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
187 185
188static const char *imx8mq_uart4_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", 186static const char * const imx8mq_uart4_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
189 "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; 187 "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
190 188
191static const char *imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m", 189static const char * const imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
192 "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; 190 "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
193 191
194static const char *imx8mq_usb_phy_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m", 192static const char * const imx8mq_usb_phy_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
195 "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; 193 "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
196 194
197static const char *imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", 195static const char * const imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
198 "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; 196 "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
199 197
200static const char *imx8mq_ecspi2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", 198static const char * const imx8mq_ecspi2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
201 "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; 199 "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
202 200
203static const char *imx8mq_pwm1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", 201static const char * const imx8mq_pwm1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
204 "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", }; 202 "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", };
205 203
206static const char *imx8mq_pwm2_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", 204static const char * const imx8mq_pwm2_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
207 "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", }; 205 "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", };
208 206
209static const char *imx8mq_pwm3_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", 207static const char * const imx8mq_pwm3_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
210 "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", }; 208 "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", };
211 209
212static const char *imx8mq_pwm4_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", 210static const char * const imx8mq_pwm4_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
213 "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", }; 211 "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", };
214 212
215static const char *imx8mq_gpt1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_400m", "sys1_pll_40m", 213static const char * const imx8mq_gpt1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_400m", "sys1_pll_40m",
216 "sys1_pll_80m", "audio_pll1_out", "clk_ext1", }; 214 "sys1_pll_80m", "audio_pll1_out", "clk_ext1", };
217 215
218static const char *imx8mq_wdog_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_160m", "vpu_pll_out", 216static const char * const imx8mq_wdog_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_160m", "vpu_pll_out",
219 "sys2_pll_125m", "sys3_pll2_out", "sys1_pll_80m", "sys2_pll_166m", }; 217 "sys2_pll_125m", "sys3_pll2_out", "sys1_pll_80m", "sys2_pll_166m", };
220 218
221static const char *imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll2_out", "sys2_pll_200m", 219static const char * const imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll2_out", "sys2_pll_200m",
222 "sys1_pll_266m", "sys2_pll_500m", "sys1_pll_100m", }; 220 "sys1_pll_266m", "sys2_pll_500m", "sys1_pll_100m", };
223 221
224static const char *imx8mq_dsi_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", 222static const char * const imx8mq_dsi_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
225 "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; 223 "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
226 224
227static const char *imx8mq_dsi_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", 225static const char * const imx8mq_dsi_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
228 "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; 226 "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
229 227
230static const char *imx8mq_dsi_dbi_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_100m", "sys1_pll_800m", 228static const char * const imx8mq_dsi_dbi_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_100m", "sys1_pll_800m",
231 "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; 229 "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
232 230
233static const char *imx8mq_dsi_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", 231static const char * const imx8mq_dsi_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
234 "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; 232 "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
235 233
236static const char *imx8mq_csi1_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", 234static const char * const imx8mq_csi1_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
237 "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; 235 "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
238 236
239static const char *imx8mq_csi1_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", 237static const char * const imx8mq_csi1_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
240 "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; 238 "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
241 239
242static const char *imx8mq_csi1_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", 240static const char * const imx8mq_csi1_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
243 "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; 241 "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
244 242
245static const char *imx8mq_csi2_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", 243static const char * const imx8mq_csi2_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
246 "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; 244 "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
247 245
248static const char *imx8mq_csi2_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", 246static const char * const imx8mq_csi2_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
249 "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; 247 "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
250 248
251static const char *imx8mq_csi2_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", 249static const char * const imx8mq_csi2_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
252 "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; 250 "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
253 251
254static const char *imx8mq_pcie2_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m", 252static const char * const imx8mq_pcie2_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
255 "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_333m", "sys3_pll2_out", }; 253 "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_333m", "sys3_pll2_out", };
256 254
257static const char *imx8mq_pcie2_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", 255static const char * const imx8mq_pcie2_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1",
258 "clk_ext2", "clk_ext3", "clk_ext4", "sys1_pll_400m", }; 256 "clk_ext2", "clk_ext3", "clk_ext4", "sys1_pll_400m", };
259 257
260static const char *imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll2_out", 258static const char * const imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll2_out",
261 "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", }; 259 "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", };
262 260
263static const char *imx8mq_ecspi3_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", 261static const char * const imx8mq_ecspi3_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
264 "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; 262 "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
265static const char *imx8mq_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; 263static const char * const imx8mq_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
266 264
267static const char *imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m", "audio_pll1_out", 265static const char * const imx8mq_clko1_sels[] = {"osc_25m", "sys1_pll_800m", "osc_27m", "sys1_pll_200m",
268 "video_pll1_out", "ckil", }; 266 "audio_pll2_out", "sys2_pll_500m", "vpu_pll_out", "sys1_pll_80m", };
267static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m",
268 "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", "ckil", };
269 269
270static struct clk_onecell_data clk_data; 270static struct clk_onecell_data clk_data;
271 271
@@ -308,10 +308,6 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
308 clks[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6); 308 clks[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6);
309 clks[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6); 309 clks[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6);
310 clks[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6); 310 clks[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6);
311 clks[IMX8MQ_SYS1_PLL1_REF_DIV] = imx_clk_divider("sys1_pll1_ref_div", "sys1_pll1_ref_sel", base + 0x38, 25, 3);
312 clks[IMX8MQ_SYS2_PLL1_REF_DIV] = imx_clk_divider("sys2_pll1_ref_div", "sys2_pll1_ref_sel", base + 0x44, 25, 3);
313 clks[IMX8MQ_SYS3_PLL1_REF_DIV] = imx_clk_divider("sys3_pll1_ref_div", "sys3_pll1_ref_sel", base + 0x50, 25, 3);
314 clks[IMX8MQ_DRAM_PLL1_REF_DIV] = imx_clk_divider("dram_pll1_ref_div", "dram_pll1_ref_sel", base + 0x68, 25, 3);
315 311
316 clks[IMX8MQ_ARM_PLL] = imx_clk_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28); 312 clks[IMX8MQ_ARM_PLL] = imx_clk_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28);
317 clks[IMX8MQ_GPU_PLL] = imx_clk_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18); 313 clks[IMX8MQ_GPU_PLL] = imx_clk_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18);
@@ -319,43 +315,15 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
319 clks[IMX8MQ_AUDIO_PLL1] = imx_clk_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0); 315 clks[IMX8MQ_AUDIO_PLL1] = imx_clk_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0);
320 clks[IMX8MQ_AUDIO_PLL2] = imx_clk_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8); 316 clks[IMX8MQ_AUDIO_PLL2] = imx_clk_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8);
321 clks[IMX8MQ_VIDEO_PLL1] = imx_clk_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10); 317 clks[IMX8MQ_VIDEO_PLL1] = imx_clk_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10);
322 clks[IMX8MQ_SYS1_PLL1] = imx_clk_sccg_pll("sys1_pll1", "sys1_pll1_ref_div", base + 0x30, SCCG_PLL1);
323 clks[IMX8MQ_SYS2_PLL1] = imx_clk_sccg_pll("sys2_pll1", "sys2_pll1_ref_div", base + 0x3c, SCCG_PLL1);
324 clks[IMX8MQ_SYS3_PLL1] = imx_clk_sccg_pll("sys3_pll1", "sys3_pll1_ref_div", base + 0x48, SCCG_PLL1);
325 clks[IMX8MQ_DRAM_PLL1] = imx_clk_sccg_pll("dram_pll1", "dram_pll1_ref_div", base + 0x60, SCCG_PLL1);
326
327 clks[IMX8MQ_SYS1_PLL2] = imx_clk_sccg_pll("sys1_pll2", "sys1_pll1_out_div", base + 0x30, SCCG_PLL2);
328 clks[IMX8MQ_SYS2_PLL2] = imx_clk_sccg_pll("sys2_pll2", "sys2_pll1_out_div", base + 0x3c, SCCG_PLL2);
329 clks[IMX8MQ_SYS3_PLL2] = imx_clk_sccg_pll("sys3_pll2", "sys3_pll1_out_div", base + 0x48, SCCG_PLL2);
330 clks[IMX8MQ_DRAM_PLL2] = imx_clk_sccg_pll("dram_pll2", "dram_pll1_out_div", base + 0x60, SCCG_PLL2);
331
332 /* PLL divs */
333 clks[IMX8MQ_SYS1_PLL1_OUT_DIV] = imx_clk_divider("sys1_pll1_out_div", "sys1_pll1_out", base + 0x38, 19, 6);
334 clks[IMX8MQ_SYS2_PLL1_OUT_DIV] = imx_clk_divider("sys2_pll1_out_div", "sys2_pll1_out", base + 0x44, 19, 6);
335 clks[IMX8MQ_SYS3_PLL1_OUT_DIV] = imx_clk_divider("sys3_pll1_out_div", "sys3_pll1_out", base + 0x50, 19, 6);
336 clks[IMX8MQ_DRAM_PLL1_OUT_DIV] = imx_clk_divider("dram_pll1_out_div", "dram_pll1_out", base + 0x68, 19, 6);
337 clks[IMX8MQ_SYS1_PLL2_DIV] = imx_clk_divider("sys1_pll2_div", "sys1_pll2", base + 0x38, 1, 6);
338 clks[IMX8MQ_SYS2_PLL2_DIV] = imx_clk_divider("sys2_pll2_div", "sys2_pll2", base + 0x44, 1, 6);
339 clks[IMX8MQ_SYS3_PLL2_DIV] = imx_clk_divider("sys3_pll2_div", "sys3_pll2", base + 0x50, 1, 6);
340 clks[IMX8MQ_DRAM_PLL2_DIV] = imx_clk_divider("dram_pll2_div", "dram_pll2", base + 0x68, 1, 6);
341 318
342 /* PLL bypass out */ 319 /* PLL bypass out */
343 clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels)); 320 clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
344 clks[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels)); 321 clks[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels));
345 clks[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels)); 322 clks[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels));
346 clks[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels)); 323 clks[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels));
347 clks[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels)); 324 clks[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels));
348 clks[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels)); 325 clks[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels));
349 326
350 clks[IMX8MQ_SYS1_PLL1_OUT] = imx_clk_mux("sys1_pll1_out", base + 0x30, 5, 1, sys1_pll1_out_sels, ARRAY_SIZE(sys1_pll1_out_sels));
351 clks[IMX8MQ_SYS2_PLL1_OUT] = imx_clk_mux("sys2_pll1_out", base + 0x3c, 5, 1, sys2_pll1_out_sels, ARRAY_SIZE(sys2_pll1_out_sels));
352 clks[IMX8MQ_SYS3_PLL1_OUT] = imx_clk_mux("sys3_pll1_out", base + 0x48, 5, 1, sys3_pll1_out_sels, ARRAY_SIZE(sys3_pll1_out_sels));
353 clks[IMX8MQ_DRAM_PLL1_OUT] = imx_clk_mux("dram_pll1_out", base + 0x60, 5, 1, dram_pll1_out_sels, ARRAY_SIZE(dram_pll1_out_sels));
354 clks[IMX8MQ_SYS1_PLL2_OUT] = imx_clk_mux("sys1_pll2_out", base + 0x30, 4, 1, sys1_pll2_out_sels, ARRAY_SIZE(sys1_pll2_out_sels));
355 clks[IMX8MQ_SYS2_PLL2_OUT] = imx_clk_mux("sys2_pll2_out", base + 0x3c, 4, 1, sys2_pll2_out_sels, ARRAY_SIZE(sys2_pll2_out_sels));
356 clks[IMX8MQ_SYS3_PLL2_OUT] = imx_clk_mux("sys3_pll2_out", base + 0x48, 4, 1, sys3_pll2_out_sels, ARRAY_SIZE(sys3_pll2_out_sels));
357 clks[IMX8MQ_DRAM_PLL2_OUT] = imx_clk_mux("dram_pll2_out", base + 0x60, 4, 1, dram_pll2_out_sels, ARRAY_SIZE(dram_pll2_out_sels));
358
359 /* PLL OUT GATE */ 327 /* PLL OUT GATE */
360 clks[IMX8MQ_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21); 328 clks[IMX8MQ_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21);
361 clks[IMX8MQ_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21); 329 clks[IMX8MQ_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21);
@@ -363,11 +331,11 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
363 clks[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21); 331 clks[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21);
364 clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21); 332 clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
365 clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21); 333 clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
366 clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_gate("sys1_pll_out", "sys1_pll2_out", base + 0x30, 9);
367 clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_gate("sys2_pll_out", "sys2_pll2_out", base + 0x3c, 9);
368 clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_gate("sys3_pll_out", "sys3_pll2_out", base + 0x48, 9);
369 clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll2_out", base + 0x60, 9);
370 334
335 clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_sccg_pll("sys1_pll_out", sys1_pll_out_sels, ARRAY_SIZE(sys1_pll_out_sels), 0, 0, 0, base + 0x30, CLK_IS_CRITICAL);
336 clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_sccg_pll("sys2_pll_out", sys2_pll_out_sels, ARRAY_SIZE(sys2_pll_out_sels), 0, 0, 1, base + 0x3c, CLK_IS_CRITICAL);
337 clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 1, base + 0x48, CLK_IS_CRITICAL);
338 clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_sccg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL);
371 /* SYS PLL fixed output */ 339 /* SYS PLL fixed output */
372 clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20); 340 clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20);
373 clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10); 341 clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10);
@@ -396,15 +364,19 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
396 364
397 /* CORE */ 365 /* CORE */
398 clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels)); 366 clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
367 clks[IMX8MQ_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
399 clks[IMX8MQ_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels)); 368 clks[IMX8MQ_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
400 clks[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels)); 369 clks[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
401 clks[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels)); 370 clks[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels));
371
402 clks[IMX8MQ_CLK_A53_CG] = imx_clk_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL); 372 clks[IMX8MQ_CLK_A53_CG] = imx_clk_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
373 clks[IMX8MQ_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
403 clks[IMX8MQ_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28); 374 clks[IMX8MQ_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
404 clks[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28); 375 clks[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
405 clks[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28); 376 clks[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
406 377
407 clks[IMX8MQ_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3); 378 clks[IMX8MQ_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
379 clks[IMX8MQ_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
408 clks[IMX8MQ_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3); 380 clks[IMX8MQ_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
409 clks[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3); 381 clks[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
410 clks[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3); 382 clks[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
@@ -479,6 +451,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
479 clks[IMX8MQ_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580); 451 clks[IMX8MQ_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580);
480 clks[IMX8MQ_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mq_wdog_sels, base + 0xb900); 452 clks[IMX8MQ_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mq_wdog_sels, base + 0xb900);
481 clks[IMX8MQ_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980); 453 clks[IMX8MQ_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980);
454 clks[IMX8MQ_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mq_clko1_sels, base + 0xba00);
482 clks[IMX8MQ_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mq_clko2_sels, base + 0xba80); 455 clks[IMX8MQ_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mq_clko2_sels, base + 0xba80);
483 clks[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00); 456 clks[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00);
484 clks[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80); 457 clks[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80);
@@ -500,6 +473,11 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
500 clks[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0); 473 clks[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
501 clks[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0); 474 clks[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
502 clks[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0); 475 clks[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
476 clks[IMX8MQ_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
477 clks[IMX8MQ_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
478 clks[IMX8MQ_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
479 clks[IMX8MQ_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
480 clks[IMX8MQ_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
503 clks[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0); 481 clks[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
504 clks[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0); 482 clks[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
505 clks[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0); 483 clks[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
@@ -558,6 +536,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
558 clks[IMX8MQ_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc_25m", 1, 8); 536 clks[IMX8MQ_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc_25m", 1, 8);
559 clks[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4); 537 clks[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
560 538
539 clks[IMX8MQ_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
540 clks[IMX8MQ_CLK_A53_DIV],
541 clks[IMX8MQ_CLK_A53_SRC],
542 clks[IMX8MQ_ARM_PLL_OUT],
543 clks[IMX8MQ_SYS1_PLL_800M]);
544
561 for (i = 0; i < IMX8MQ_CLK_END; i++) 545 for (i = 0; i < IMX8MQ_CLK_END; i++)
562 if (IS_ERR(clks[i])) 546 if (IS_ERR(clks[i]))
563 pr_err("i.MX8mq clk %u register failed with %ld\n", 547 pr_err("i.MX8mq clk %u register failed with %ld\n",
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index 99c2508de8e5..fb6edf1b8aa2 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -169,6 +169,8 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
169 return -ENODEV; 169 return -ENODEV;
170 170
171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
172 if (!res)
173 return -EINVAL;
172 base = devm_ioremap(dev, res->start, resource_size(res)); 174 base = devm_ioremap(dev, res->start, resource_size(res));
173 if (!base) 175 if (!base)
174 return -ENOMEM; 176 return -ENOMEM;
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index 83e2ef96d81d..5e2903efc488 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -138,6 +138,7 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
138} 138}
139 139
140static const struct of_device_id imx8qxp_match[] = { 140static const struct of_device_id imx8qxp_match[] = {
141 { .compatible = "fsl,scu-clk", },
141 { .compatible = "fsl,imx8qxp-clk", }, 142 { .compatible = "fsl,imx8qxp-clk", },
142 { /* sentinel */ } 143 { /* sentinel */ }
143}; 144};
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
new file mode 100644
index 000000000000..1acfa3e3cfb4
--- /dev/null
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -0,0 +1,392 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2017-2018 NXP.
4 */
5
6#include <linux/bitops.h>
7#include <linux/clk-provider.h>
8#include <linux/err.h>
9#include <linux/io.h>
10#include <linux/iopoll.h>
11#include <linux/slab.h>
12#include <linux/jiffies.h>
13
14#include "clk.h"
15
16#define GNRL_CTL 0x0
17#define DIV_CTL 0x4
18#define LOCK_STATUS BIT(31)
19#define LOCK_SEL_MASK BIT(29)
20#define CLKE_MASK BIT(11)
21#define RST_MASK BIT(9)
22#define BYPASS_MASK BIT(4)
23#define MDIV_SHIFT 12
24#define MDIV_MASK GENMASK(21, 12)
25#define PDIV_SHIFT 4
26#define PDIV_MASK GENMASK(9, 4)
27#define SDIV_SHIFT 0
28#define SDIV_MASK GENMASK(2, 0)
29#define KDIV_SHIFT 0
30#define KDIV_MASK GENMASK(15, 0)
31
32#define LOCK_TIMEOUT_US 10000
33
34struct clk_pll14xx {
35 struct clk_hw hw;
36 void __iomem *base;
37 enum imx_pll14xx_type type;
38 const struct imx_pll14xx_rate_table *rate_table;
39 int rate_count;
40};
41
42#define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
43
44static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
45 struct clk_pll14xx *pll, unsigned long rate)
46{
47 const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
48 int i;
49
50 for (i = 0; i < pll->rate_count; i++)
51 if (rate == rate_table[i].rate)
52 return &rate_table[i];
53
54 return NULL;
55}
56
57static long clk_pll14xx_round_rate(struct clk_hw *hw, unsigned long rate,
58 unsigned long *prate)
59{
60 struct clk_pll14xx *pll = to_clk_pll14xx(hw);
61 const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
62 int i;
63
64 /* Assumming rate_table is in descending order */
65 for (i = 0; i < pll->rate_count; i++)
66 if (rate >= rate_table[i].rate)
67 return rate_table[i].rate;
68
69 /* return minimum supported value */
70 return rate_table[i - 1].rate;
71}
72
73static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw,
74 unsigned long parent_rate)
75{
76 struct clk_pll14xx *pll = to_clk_pll14xx(hw);
77 u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div;
78 u64 fvco = parent_rate;
79
80 pll_gnrl = readl_relaxed(pll->base);
81 pll_div = readl_relaxed(pll->base + 4);
82 mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
83 pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
84 sdiv = (pll_div & SDIV_MASK) >> SDIV_SHIFT;
85
86 fvco *= mdiv;
87 do_div(fvco, pdiv << sdiv);
88
89 return fvco;
90}
91
92static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
93 unsigned long parent_rate)
94{
95 struct clk_pll14xx *pll = to_clk_pll14xx(hw);
96 u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div_ctl0, pll_div_ctl1;
97 short int kdiv;
98 u64 fvco = parent_rate;
99
100 pll_gnrl = readl_relaxed(pll->base);
101 pll_div_ctl0 = readl_relaxed(pll->base + 4);
102 pll_div_ctl1 = readl_relaxed(pll->base + 8);
103 mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT;
104 pdiv = (pll_div_ctl0 & PDIV_MASK) >> PDIV_SHIFT;
105 sdiv = (pll_div_ctl0 & SDIV_MASK) >> SDIV_SHIFT;
106 kdiv = pll_div_ctl1 & KDIV_MASK;
107
108 /* fvco = (m * 65536 + k) * Fin / (p * 65536) */
109 fvco *= (mdiv * 65536 + kdiv);
110 pdiv *= 65536;
111
112 do_div(fvco, pdiv << sdiv);
113
114 return fvco;
115}
116
117static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate,
118 u32 pll_div)
119{
120 u32 old_mdiv, old_pdiv;
121
122 old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK;
123 old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK;
124
125 return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
126}
127
128static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate,
129 u32 pll_div_ctl0, u32 pll_div_ctl1)
130{
131 u32 old_mdiv, old_pdiv, old_kdiv;
132
133 old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
134 old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
135 old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
136
137 return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
138 rate->kdiv != old_kdiv;
139}
140
141static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate,
142 u32 pll_div_ctl0, u32 pll_div_ctl1)
143{
144 u32 old_mdiv, old_pdiv, old_kdiv;
145
146 old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
147 old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
148 old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
149
150 return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
151 rate->kdiv != old_kdiv;
152}
153
154static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
155{
156 u32 val;
157
158 return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0,
159 LOCK_TIMEOUT_US);
160}
161
162static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
163 unsigned long prate)
164{
165 struct clk_pll14xx *pll = to_clk_pll14xx(hw);
166 const struct imx_pll14xx_rate_table *rate;
167 u32 tmp, div_val;
168 int ret;
169
170 rate = imx_get_pll_settings(pll, drate);
171 if (!rate) {
172 pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
173 drate, clk_hw_get_name(hw));
174 return -EINVAL;
175 }
176
177 tmp = readl_relaxed(pll->base + 4);
178
179 if (!clk_pll1416x_mp_change(rate, tmp)) {
180 tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
181 tmp |= rate->sdiv << SDIV_SHIFT;
182 writel_relaxed(tmp, pll->base + 4);
183
184 return 0;
185 }
186
187 /* Bypass clock and set lock to pll output lock */
188 tmp = readl_relaxed(pll->base);
189 tmp |= LOCK_SEL_MASK;
190 writel_relaxed(tmp, pll->base);
191
192 /* Enable RST */
193 tmp &= ~RST_MASK;
194 writel_relaxed(tmp, pll->base);
195
196 div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
197 (rate->sdiv << SDIV_SHIFT);
198 writel_relaxed(div_val, pll->base + 0x4);
199
200 /*
201 * According to SPEC, t3 - t2 need to be greater than
202 * 1us and 1/FREF, respectively.
203 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
204 * 3us.
205 */
206 udelay(3);
207
208 /* Disable RST */
209 tmp |= RST_MASK;
210 writel_relaxed(tmp, pll->base);
211
212 /* Wait Lock */
213 ret = clk_pll14xx_wait_lock(pll);
214 if (ret)
215 return ret;
216
217 /* Bypass */
218 tmp &= ~BYPASS_MASK;
219 writel_relaxed(tmp, pll->base);
220
221 return 0;
222}
223
224static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
225 unsigned long prate)
226{
227 struct clk_pll14xx *pll = to_clk_pll14xx(hw);
228 const struct imx_pll14xx_rate_table *rate;
229 u32 tmp, div_val;
230 int ret;
231
232 rate = imx_get_pll_settings(pll, drate);
233 if (!rate) {
234 pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
235 drate, clk_hw_get_name(hw));
236 return -EINVAL;
237 }
238
239 tmp = readl_relaxed(pll->base + 4);
240 div_val = readl_relaxed(pll->base + 8);
241
242 if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) {
243 tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
244 tmp |= rate->sdiv << SDIV_SHIFT;
245 writel_relaxed(tmp, pll->base + 4);
246
247 return 0;
248 }
249
250 /* Enable RST */
251 tmp = readl_relaxed(pll->base);
252 tmp &= ~RST_MASK;
253 writel_relaxed(tmp, pll->base);
254
255 div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
256 (rate->sdiv << SDIV_SHIFT);
257 writel_relaxed(div_val, pll->base + 0x4);
258 writel_relaxed(rate->kdiv << KDIV_SHIFT, pll->base + 0x8);
259
260 /*
261 * According to SPEC, t3 - t2 need to be greater than
262 * 1us and 1/FREF, respectively.
263 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
264 * 3us.
265 */
266 udelay(3);
267
268 /* Disable RST */
269 tmp |= RST_MASK;
270 writel_relaxed(tmp, pll->base);
271
272 /* Wait Lock*/
273 ret = clk_pll14xx_wait_lock(pll);
274 if (ret)
275 return ret;
276
277 /* Bypass */
278 tmp &= ~BYPASS_MASK;
279 writel_relaxed(tmp, pll->base);
280
281 return 0;
282}
283
284static int clk_pll14xx_prepare(struct clk_hw *hw)
285{
286 struct clk_pll14xx *pll = to_clk_pll14xx(hw);
287 u32 val;
288
289 /*
290 * RESETB = 1 from 0, PLL starts its normal
291 * operation after lock time
292 */
293 val = readl_relaxed(pll->base + GNRL_CTL);
294 val |= RST_MASK;
295 writel_relaxed(val, pll->base + GNRL_CTL);
296
297 return clk_pll14xx_wait_lock(pll);
298}
299
300static int clk_pll14xx_is_prepared(struct clk_hw *hw)
301{
302 struct clk_pll14xx *pll = to_clk_pll14xx(hw);
303 u32 val;
304
305 val = readl_relaxed(pll->base + GNRL_CTL);
306
307 return (val & RST_MASK) ? 1 : 0;
308}
309
310static void clk_pll14xx_unprepare(struct clk_hw *hw)
311{
312 struct clk_pll14xx *pll = to_clk_pll14xx(hw);
313 u32 val;
314
315 /*
316 * Set RST to 0, power down mode is enabled and
317 * every digital block is reset
318 */
319 val = readl_relaxed(pll->base + GNRL_CTL);
320 val &= ~RST_MASK;
321 writel_relaxed(val, pll->base + GNRL_CTL);
322}
323
324static const struct clk_ops clk_pll1416x_ops = {
325 .prepare = clk_pll14xx_prepare,
326 .unprepare = clk_pll14xx_unprepare,
327 .is_prepared = clk_pll14xx_is_prepared,
328 .recalc_rate = clk_pll1416x_recalc_rate,
329 .round_rate = clk_pll14xx_round_rate,
330 .set_rate = clk_pll1416x_set_rate,
331};
332
333static const struct clk_ops clk_pll1416x_min_ops = {
334 .recalc_rate = clk_pll1416x_recalc_rate,
335};
336
337static const struct clk_ops clk_pll1443x_ops = {
338 .prepare = clk_pll14xx_prepare,
339 .unprepare = clk_pll14xx_unprepare,
340 .is_prepared = clk_pll14xx_is_prepared,
341 .recalc_rate = clk_pll1443x_recalc_rate,
342 .round_rate = clk_pll14xx_round_rate,
343 .set_rate = clk_pll1443x_set_rate,
344};
345
346struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
347 void __iomem *base,
348 const struct imx_pll14xx_clk *pll_clk)
349{
350 struct clk_pll14xx *pll;
351 struct clk *clk;
352 struct clk_init_data init;
353
354 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
355 if (!pll)
356 return ERR_PTR(-ENOMEM);
357
358 init.name = name;
359 init.flags = pll_clk->flags;
360 init.parent_names = &parent_name;
361 init.num_parents = 1;
362
363 switch (pll_clk->type) {
364 case PLL_1416X:
365 if (!pll->rate_table)
366 init.ops = &clk_pll1416x_min_ops;
367 else
368 init.ops = &clk_pll1416x_ops;
369 break;
370 case PLL_1443X:
371 init.ops = &clk_pll1443x_ops;
372 break;
373 default:
374 pr_err("%s: Unknown pll type for pll clk %s\n",
375 __func__, name);
376 };
377
378 pll->base = base;
379 pll->hw.init = &init;
380 pll->type = pll_clk->type;
381 pll->rate_table = pll_clk->rate_table;
382 pll->rate_count = pll_clk->rate_count;
383
384 clk = clk_register(NULL, &pll->hw);
385 if (IS_ERR(clk)) {
386 pr_err("%s: failed to register pll %s %lu\n",
387 __func__, name, PTR_ERR(clk));
388 kfree(pll);
389 }
390
391 return clk;
392}
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
index ee7752bace89..9dfd03a95557 100644
--- a/drivers/clk/imx/clk-sccg-pll.c
+++ b/drivers/clk/imx/clk-sccg-pll.c
@@ -25,87 +25,292 @@
25#define PLL_DIVF2_MASK GENMASK(12, 7) 25#define PLL_DIVF2_MASK GENMASK(12, 7)
26#define PLL_DIVR1_MASK GENMASK(27, 25) 26#define PLL_DIVR1_MASK GENMASK(27, 25)
27#define PLL_DIVR2_MASK GENMASK(24, 19) 27#define PLL_DIVR2_MASK GENMASK(24, 19)
28#define PLL_DIVQ_MASK GENMASK(6, 1)
28#define PLL_REF_MASK GENMASK(2, 0) 29#define PLL_REF_MASK GENMASK(2, 0)
29 30
30#define PLL_LOCK_MASK BIT(31) 31#define PLL_LOCK_MASK BIT(31)
31#define PLL_PD_MASK BIT(7) 32#define PLL_PD_MASK BIT(7)
32 33
33#define OSC_25M 25000000 34/* These are the specification limits for the SSCG PLL */
34#define OSC_27M 27000000 35#define PLL_REF_MIN_FREQ 25000000UL
36#define PLL_REF_MAX_FREQ 235000000UL
35 37
36#define PLL_SCCG_LOCK_TIMEOUT 70 38#define PLL_STAGE1_MIN_FREQ 1600000000UL
39#define PLL_STAGE1_MAX_FREQ 2400000000UL
40
41#define PLL_STAGE1_REF_MIN_FREQ 25000000UL
42#define PLL_STAGE1_REF_MAX_FREQ 54000000UL
43
44#define PLL_STAGE2_MIN_FREQ 1200000000UL
45#define PLL_STAGE2_MAX_FREQ 2400000000UL
46
47#define PLL_STAGE2_REF_MIN_FREQ 54000000UL
48#define PLL_STAGE2_REF_MAX_FREQ 75000000UL
49
50#define PLL_OUT_MIN_FREQ 20000000UL
51#define PLL_OUT_MAX_FREQ 1200000000UL
52
53#define PLL_DIVR1_MAX 7
54#define PLL_DIVR2_MAX 63
55#define PLL_DIVF1_MAX 63
56#define PLL_DIVF2_MAX 63
57#define PLL_DIVQ_MAX 63
58
59#define PLL_BYPASS_NONE 0x0
60#define PLL_BYPASS1 0x2
61#define PLL_BYPASS2 0x1
62
63#define SSCG_PLL_BYPASS1_MASK BIT(5)
64#define SSCG_PLL_BYPASS2_MASK BIT(4)
65#define SSCG_PLL_BYPASS_MASK GENMASK(5, 4)
66
67#define PLL_SCCG_LOCK_TIMEOUT 70
68
69struct clk_sccg_pll_setup {
70 int divr1, divf1;
71 int divr2, divf2;
72 int divq;
73 int bypass;
74
75 uint64_t vco1;
76 uint64_t vco2;
77 uint64_t fout;
78 uint64_t ref;
79 uint64_t ref_div1;
80 uint64_t ref_div2;
81 uint64_t fout_request;
82 int fout_error;
83};
37 84
38struct clk_sccg_pll { 85struct clk_sccg_pll {
39 struct clk_hw hw; 86 struct clk_hw hw;
40 void __iomem *base; 87 const struct clk_ops ops;
88
89 void __iomem *base;
90
91 struct clk_sccg_pll_setup setup;
92
93 u8 parent;
94 u8 bypass1;
95 u8 bypass2;
41}; 96};
42 97
43#define to_clk_sccg_pll(_hw) container_of(_hw, struct clk_sccg_pll, hw) 98#define to_clk_sccg_pll(_hw) container_of(_hw, struct clk_sccg_pll, hw)
44 99
45static int clk_pll_wait_lock(struct clk_sccg_pll *pll) 100static int clk_sccg_pll_wait_lock(struct clk_sccg_pll *pll)
46{ 101{
47 u32 val; 102 u32 val;
48 103
49 return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK, 0, 104 val = readl_relaxed(pll->base + PLL_CFG0);
50 PLL_SCCG_LOCK_TIMEOUT); 105
106 /* don't wait for lock if all plls are bypassed */
107 if (!(val & SSCG_PLL_BYPASS2_MASK))
108 return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK,
109 0, PLL_SCCG_LOCK_TIMEOUT);
110
111 return 0;
51} 112}
52 113
53static int clk_pll1_is_prepared(struct clk_hw *hw) 114static int clk_sccg_pll2_check_match(struct clk_sccg_pll_setup *setup,
115 struct clk_sccg_pll_setup *temp_setup)
54{ 116{
55 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); 117 int new_diff = temp_setup->fout - temp_setup->fout_request;
56 u32 val; 118 int diff = temp_setup->fout_error;
57 119
58 val = readl_relaxed(pll->base + PLL_CFG0); 120 if (abs(diff) > abs(new_diff)) {
59 return (val & PLL_PD_MASK) ? 0 : 1; 121 temp_setup->fout_error = new_diff;
122 memcpy(setup, temp_setup, sizeof(struct clk_sccg_pll_setup));
123
124 if (temp_setup->fout_request == temp_setup->fout)
125 return 0;
126 }
127 return -1;
60} 128}
61 129
62static unsigned long clk_pll1_recalc_rate(struct clk_hw *hw, 130static int clk_sccg_divq_lookup(struct clk_sccg_pll_setup *setup,
63 unsigned long parent_rate) 131 struct clk_sccg_pll_setup *temp_setup)
64{ 132{
65 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); 133 int ret = -EINVAL;
66 u32 val, divf; 134
135 for (temp_setup->divq = 0; temp_setup->divq <= PLL_DIVQ_MAX;
136 temp_setup->divq++) {
137 temp_setup->vco2 = temp_setup->vco1;
138 do_div(temp_setup->vco2, temp_setup->divr2 + 1);
139 temp_setup->vco2 *= 2;
140 temp_setup->vco2 *= temp_setup->divf2 + 1;
141 if (temp_setup->vco2 >= PLL_STAGE2_MIN_FREQ &&
142 temp_setup->vco2 <= PLL_STAGE2_MAX_FREQ) {
143 temp_setup->fout = temp_setup->vco2;
144 do_div(temp_setup->fout, 2 * (temp_setup->divq + 1));
145
146 ret = clk_sccg_pll2_check_match(setup, temp_setup);
147 if (!ret) {
148 temp_setup->bypass = PLL_BYPASS1;
149 return ret;
150 }
151 }
152 }
67 153
68 val = readl_relaxed(pll->base + PLL_CFG2); 154 return ret;
69 divf = FIELD_GET(PLL_DIVF1_MASK, val); 155}
156
157static int clk_sccg_divf2_lookup(struct clk_sccg_pll_setup *setup,
158 struct clk_sccg_pll_setup *temp_setup)
159{
160 int ret = -EINVAL;
161
162 for (temp_setup->divf2 = 0; temp_setup->divf2 <= PLL_DIVF2_MAX;
163 temp_setup->divf2++) {
164 ret = clk_sccg_divq_lookup(setup, temp_setup);
165 if (!ret)
166 return ret;
167 }
70 168
71 return parent_rate * 2 * (divf + 1); 169 return ret;
72} 170}
73 171
74static long clk_pll1_round_rate(struct clk_hw *hw, unsigned long rate, 172static int clk_sccg_divr2_lookup(struct clk_sccg_pll_setup *setup,
75 unsigned long *prate) 173 struct clk_sccg_pll_setup *temp_setup)
76{ 174{
77 unsigned long parent_rate = *prate; 175 int ret = -EINVAL;
78 u32 div; 176
177 for (temp_setup->divr2 = 0; temp_setup->divr2 <= PLL_DIVR2_MAX;
178 temp_setup->divr2++) {
179 temp_setup->ref_div2 = temp_setup->vco1;
180 do_div(temp_setup->ref_div2, temp_setup->divr2 + 1);
181 if (temp_setup->ref_div2 >= PLL_STAGE2_REF_MIN_FREQ &&
182 temp_setup->ref_div2 <= PLL_STAGE2_REF_MAX_FREQ) {
183 ret = clk_sccg_divf2_lookup(setup, temp_setup);
184 if (!ret)
185 return ret;
186 }
187 }
188
189 return ret;
190}
191
192static int clk_sccg_pll2_find_setup(struct clk_sccg_pll_setup *setup,
193 struct clk_sccg_pll_setup *temp_setup,
194 uint64_t ref)
195{
196
197 int ret = -EINVAL;
79 198
80 if (!parent_rate) 199 if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
81 return 0; 200 return ret;
82 201
83 div = rate / (parent_rate * 2); 202 temp_setup->vco1 = ref;
84 203
85 return parent_rate * div * 2; 204 ret = clk_sccg_divr2_lookup(setup, temp_setup);
205 return ret;
86} 206}
87 207
88static int clk_pll1_set_rate(struct clk_hw *hw, unsigned long rate, 208static int clk_sccg_divf1_lookup(struct clk_sccg_pll_setup *setup,
89 unsigned long parent_rate) 209 struct clk_sccg_pll_setup *temp_setup)
90{ 210{
91 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); 211 int ret = -EINVAL;
92 u32 val;
93 u32 divf;
94 212
95 if (!parent_rate) 213 for (temp_setup->divf1 = 0; temp_setup->divf1 <= PLL_DIVF1_MAX;
96 return -EINVAL; 214 temp_setup->divf1++) {
215 uint64_t vco1 = temp_setup->ref;
97 216
98 divf = rate / (parent_rate * 2); 217 do_div(vco1, temp_setup->divr1 + 1);
218 vco1 *= 2;
219 vco1 *= temp_setup->divf1 + 1;
99 220
100 val = readl_relaxed(pll->base + PLL_CFG2); 221 ret = clk_sccg_pll2_find_setup(setup, temp_setup, vco1);
101 val &= ~PLL_DIVF1_MASK; 222 if (!ret) {
102 val |= FIELD_PREP(PLL_DIVF1_MASK, divf - 1); 223 temp_setup->bypass = PLL_BYPASS_NONE;
103 writel_relaxed(val, pll->base + PLL_CFG2); 224 return ret;
225 }
226 }
227
228 return ret;
229}
230
231static int clk_sccg_divr1_lookup(struct clk_sccg_pll_setup *setup,
232 struct clk_sccg_pll_setup *temp_setup)
233{
234 int ret = -EINVAL;
235
236 for (temp_setup->divr1 = 0; temp_setup->divr1 <= PLL_DIVR1_MAX;
237 temp_setup->divr1++) {
238 temp_setup->ref_div1 = temp_setup->ref;
239 do_div(temp_setup->ref_div1, temp_setup->divr1 + 1);
240 if (temp_setup->ref_div1 >= PLL_STAGE1_REF_MIN_FREQ &&
241 temp_setup->ref_div1 <= PLL_STAGE1_REF_MAX_FREQ) {
242 ret = clk_sccg_divf1_lookup(setup, temp_setup);
243 if (!ret)
244 return ret;
245 }
246 }
247
248 return ret;
249}
250
251static int clk_sccg_pll1_find_setup(struct clk_sccg_pll_setup *setup,
252 struct clk_sccg_pll_setup *temp_setup,
253 uint64_t ref)
254{
255
256 int ret = -EINVAL;
257
258 if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
259 return ret;
260
261 temp_setup->ref = ref;
262
263 ret = clk_sccg_divr1_lookup(setup, temp_setup);
264
265 return ret;
266}
267
268static int clk_sccg_pll_find_setup(struct clk_sccg_pll_setup *setup,
269 uint64_t prate,
270 uint64_t rate, int try_bypass)
271{
272 struct clk_sccg_pll_setup temp_setup;
273 int ret = -EINVAL;
274
275 memset(&temp_setup, 0, sizeof(struct clk_sccg_pll_setup));
276 memset(setup, 0, sizeof(struct clk_sccg_pll_setup));
277
278 temp_setup.fout_error = PLL_OUT_MAX_FREQ;
279 temp_setup.fout_request = rate;
280
281 switch (try_bypass) {
104 282
105 return clk_pll_wait_lock(pll); 283 case PLL_BYPASS2:
284 if (prate == rate) {
285 setup->bypass = PLL_BYPASS2;
286 setup->fout = rate;
287 ret = 0;
288 }
289 break;
290
291 case PLL_BYPASS1:
292 ret = clk_sccg_pll2_find_setup(setup, &temp_setup, prate);
293 break;
294
295 case PLL_BYPASS_NONE:
296 ret = clk_sccg_pll1_find_setup(setup, &temp_setup, prate);
297 break;
298 }
299
300 return ret;
301}
302
303
304static int clk_sccg_pll_is_prepared(struct clk_hw *hw)
305{
306 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
307
308 u32 val = readl_relaxed(pll->base + PLL_CFG0);
309
310 return (val & PLL_PD_MASK) ? 0 : 1;
106} 311}
107 312
108static int clk_pll1_prepare(struct clk_hw *hw) 313static int clk_sccg_pll_prepare(struct clk_hw *hw)
109{ 314{
110 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); 315 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
111 u32 val; 316 u32 val;
@@ -114,10 +319,10 @@ static int clk_pll1_prepare(struct clk_hw *hw)
114 val &= ~PLL_PD_MASK; 319 val &= ~PLL_PD_MASK;
115 writel_relaxed(val, pll->base + PLL_CFG0); 320 writel_relaxed(val, pll->base + PLL_CFG0);
116 321
117 return clk_pll_wait_lock(pll); 322 return clk_sccg_pll_wait_lock(pll);
118} 323}
119 324
120static void clk_pll1_unprepare(struct clk_hw *hw) 325static void clk_sccg_pll_unprepare(struct clk_hw *hw)
121{ 326{
122 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); 327 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
123 u32 val; 328 u32 val;
@@ -125,121 +330,208 @@ static void clk_pll1_unprepare(struct clk_hw *hw)
125 val = readl_relaxed(pll->base + PLL_CFG0); 330 val = readl_relaxed(pll->base + PLL_CFG0);
126 val |= PLL_PD_MASK; 331 val |= PLL_PD_MASK;
127 writel_relaxed(val, pll->base + PLL_CFG0); 332 writel_relaxed(val, pll->base + PLL_CFG0);
128
129} 333}
130 334
131static unsigned long clk_pll2_recalc_rate(struct clk_hw *hw, 335static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw,
132 unsigned long parent_rate) 336 unsigned long parent_rate)
133{ 337{
134 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); 338 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
135 u32 val, ref, divr1, divf1, divr2, divf2; 339 u32 val, divr1, divf1, divr2, divf2, divq;
136 u64 temp64; 340 u64 temp64;
137 341
138 val = readl_relaxed(pll->base + PLL_CFG0);
139 switch (FIELD_GET(PLL_REF_MASK, val)) {
140 case 0:
141 ref = OSC_25M;
142 break;
143 case 1:
144 ref = OSC_27M;
145 break;
146 default:
147 ref = OSC_25M;
148 break;
149 }
150
151 val = readl_relaxed(pll->base + PLL_CFG2); 342 val = readl_relaxed(pll->base + PLL_CFG2);
152 divr1 = FIELD_GET(PLL_DIVR1_MASK, val); 343 divr1 = FIELD_GET(PLL_DIVR1_MASK, val);
153 divr2 = FIELD_GET(PLL_DIVR2_MASK, val); 344 divr2 = FIELD_GET(PLL_DIVR2_MASK, val);
154 divf1 = FIELD_GET(PLL_DIVF1_MASK, val); 345 divf1 = FIELD_GET(PLL_DIVF1_MASK, val);
155 divf2 = FIELD_GET(PLL_DIVF2_MASK, val); 346 divf2 = FIELD_GET(PLL_DIVF2_MASK, val);
156 347 divq = FIELD_GET(PLL_DIVQ_MASK, val);
157 temp64 = ref * 2; 348
158 temp64 *= (divf1 + 1) * (divf2 + 1); 349 temp64 = parent_rate;
159 350
160 do_div(temp64, (divr1 + 1) * (divr2 + 1)); 351 val = clk_readl(pll->base + PLL_CFG0);
352 if (val & SSCG_PLL_BYPASS2_MASK) {
353 temp64 = parent_rate;
354 } else if (val & SSCG_PLL_BYPASS1_MASK) {
355 temp64 *= divf2;
356 do_div(temp64, (divr2 + 1) * (divq + 1));
357 } else {
358 temp64 *= 2;
359 temp64 *= (divf1 + 1) * (divf2 + 1);
360 do_div(temp64, (divr1 + 1) * (divr2 + 1) * (divq + 1));
361 }
161 362
162 return temp64; 363 return temp64;
163} 364}
164 365
165static long clk_pll2_round_rate(struct clk_hw *hw, unsigned long rate, 366static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
166 unsigned long *prate) 367 unsigned long parent_rate)
167{ 368{
168 u32 div; 369 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
169 unsigned long parent_rate = *prate; 370 struct clk_sccg_pll_setup *setup = &pll->setup;
371 u32 val;
170 372
171 if (!parent_rate) 373 /* set bypass here too since the parent might be the same */
172 return 0; 374 val = clk_readl(pll->base + PLL_CFG0);
375 val &= ~SSCG_PLL_BYPASS_MASK;
376 val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass);
377 clk_writel(val, pll->base + PLL_CFG0);
173 378
174 div = rate / parent_rate; 379 val = readl_relaxed(pll->base + PLL_CFG2);
380 val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK);
381 val &= ~(PLL_DIVR1_MASK | PLL_DIVR2_MASK | PLL_DIVQ_MASK);
382 val |= FIELD_PREP(PLL_DIVF1_MASK, setup->divf1);
383 val |= FIELD_PREP(PLL_DIVF2_MASK, setup->divf2);
384 val |= FIELD_PREP(PLL_DIVR1_MASK, setup->divr1);
385 val |= FIELD_PREP(PLL_DIVR2_MASK, setup->divr2);
386 val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq);
387 writel_relaxed(val, pll->base + PLL_CFG2);
175 388
176 return parent_rate * div; 389 return clk_sccg_pll_wait_lock(pll);
177} 390}
178 391
179static int clk_pll2_set_rate(struct clk_hw *hw, unsigned long rate, 392static u8 clk_sccg_pll_get_parent(struct clk_hw *hw)
180 unsigned long parent_rate)
181{ 393{
394 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
182 u32 val; 395 u32 val;
183 u32 divf; 396 u8 ret = pll->parent;
397
398 val = clk_readl(pll->base + PLL_CFG0);
399 if (val & SSCG_PLL_BYPASS2_MASK)
400 ret = pll->bypass2;
401 else if (val & SSCG_PLL_BYPASS1_MASK)
402 ret = pll->bypass1;
403 return ret;
404}
405
406static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index)
407{
184 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); 408 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
409 u32 val;
185 410
186 if (!parent_rate) 411 val = clk_readl(pll->base + PLL_CFG0);
187 return -EINVAL; 412 val &= ~SSCG_PLL_BYPASS_MASK;
413 val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
414 clk_writel(val, pll->base + PLL_CFG0);
188 415
189 divf = rate / parent_rate; 416 return clk_sccg_pll_wait_lock(pll);
417}
190 418
191 val = readl_relaxed(pll->base + PLL_CFG2); 419static int __clk_sccg_pll_determine_rate(struct clk_hw *hw,
192 val &= ~PLL_DIVF2_MASK; 420 struct clk_rate_request *req,
193 val |= FIELD_PREP(PLL_DIVF2_MASK, divf - 1); 421 uint64_t min,
194 writel_relaxed(val, pll->base + PLL_CFG2); 422 uint64_t max,
423 uint64_t rate,
424 int bypass)
425{
426 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
427 struct clk_sccg_pll_setup *setup = &pll->setup;
428 struct clk_hw *parent_hw = NULL;
429 int bypass_parent_index;
430 int ret = -EINVAL;
431
432 req->max_rate = max;
433 req->min_rate = min;
434
435 switch (bypass) {
436 case PLL_BYPASS2:
437 bypass_parent_index = pll->bypass2;
438 break;
439 case PLL_BYPASS1:
440 bypass_parent_index = pll->bypass1;
441 break;
442 default:
443 bypass_parent_index = pll->parent;
444 break;
445 }
446
447 parent_hw = clk_hw_get_parent_by_index(hw, bypass_parent_index);
448 ret = __clk_determine_rate(parent_hw, req);
449 if (!ret) {
450 ret = clk_sccg_pll_find_setup(setup, req->rate,
451 rate, bypass);
452 }
453
454 req->best_parent_hw = parent_hw;
455 req->best_parent_rate = req->rate;
456 req->rate = setup->fout;
195 457
196 return clk_pll_wait_lock(pll); 458 return ret;
197} 459}
198 460
199static const struct clk_ops clk_sccg_pll1_ops = { 461static int clk_sccg_pll_determine_rate(struct clk_hw *hw,
200 .is_prepared = clk_pll1_is_prepared, 462 struct clk_rate_request *req)
201 .recalc_rate = clk_pll1_recalc_rate, 463{
202 .round_rate = clk_pll1_round_rate, 464 struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
203 .set_rate = clk_pll1_set_rate, 465 struct clk_sccg_pll_setup *setup = &pll->setup;
204}; 466 uint64_t rate = req->rate;
467 uint64_t min = req->min_rate;
468 uint64_t max = req->max_rate;
469 int ret = -EINVAL;
470
471 if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
472 return ret;
473
474 ret = __clk_sccg_pll_determine_rate(hw, req, req->rate, req->rate,
475 rate, PLL_BYPASS2);
476 if (!ret)
477 return ret;
478
479 ret = __clk_sccg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
480 PLL_STAGE1_REF_MAX_FREQ, rate,
481 PLL_BYPASS1);
482 if (!ret)
483 return ret;
484
485 ret = __clk_sccg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
486 PLL_REF_MAX_FREQ, rate,
487 PLL_BYPASS_NONE);
488 if (!ret)
489 return ret;
490
491 if (setup->fout >= min && setup->fout <= max)
492 ret = 0;
493
494 return ret;
495}
205 496
206static const struct clk_ops clk_sccg_pll2_ops = { 497static const struct clk_ops clk_sccg_pll_ops = {
207 .prepare = clk_pll1_prepare, 498 .prepare = clk_sccg_pll_prepare,
208 .unprepare = clk_pll1_unprepare, 499 .unprepare = clk_sccg_pll_unprepare,
209 .recalc_rate = clk_pll2_recalc_rate, 500 .is_prepared = clk_sccg_pll_is_prepared,
210 .round_rate = clk_pll2_round_rate, 501 .recalc_rate = clk_sccg_pll_recalc_rate,
211 .set_rate = clk_pll2_set_rate, 502 .set_rate = clk_sccg_pll_set_rate,
503 .set_parent = clk_sccg_pll_set_parent,
504 .get_parent = clk_sccg_pll_get_parent,
505 .determine_rate = clk_sccg_pll_determine_rate,
212}; 506};
213 507
214struct clk *imx_clk_sccg_pll(const char *name, 508struct clk *imx_clk_sccg_pll(const char *name,
215 const char *parent_name, 509 const char * const *parent_names,
510 u8 num_parents,
511 u8 parent, u8 bypass1, u8 bypass2,
216 void __iomem *base, 512 void __iomem *base,
217 enum imx_sccg_pll_type pll_type) 513 unsigned long flags)
218{ 514{
219 struct clk_sccg_pll *pll; 515 struct clk_sccg_pll *pll;
220 struct clk_init_data init; 516 struct clk_init_data init;
221 struct clk_hw *hw; 517 struct clk_hw *hw;
222 int ret; 518 int ret;
223 519
224 switch (pll_type) {
225 case SCCG_PLL1:
226 init.ops = &clk_sccg_pll1_ops;
227 break;
228 case SCCG_PLL2:
229 init.ops = &clk_sccg_pll2_ops;
230 break;
231 default:
232 return ERR_PTR(-EINVAL);
233 }
234
235 pll = kzalloc(sizeof(*pll), GFP_KERNEL); 520 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
236 if (!pll) 521 if (!pll)
237 return ERR_PTR(-ENOMEM); 522 return ERR_PTR(-ENOMEM);
238 523
524 pll->parent = parent;
525 pll->bypass1 = bypass1;
526 pll->bypass2 = bypass2;
527
528 pll->base = base;
239 init.name = name; 529 init.name = name;
240 init.flags = 0; 530 init.ops = &clk_sccg_pll_ops;
241 init.parent_names = &parent_name; 531
242 init.num_parents = 1; 532 init.flags = flags;
533 init.parent_names = parent_names;
534 init.num_parents = num_parents;
243 535
244 pll->base = base; 536 pll->base = base;
245 pll->hw.init = &init; 537 pll->hw.init = &init;
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index 7ccf7edfe11c..fbef740704d0 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -4,12 +4,17 @@
4 * Dong Aisheng <aisheng.dong@nxp.com> 4 * Dong Aisheng <aisheng.dong@nxp.com>
5 */ 5 */
6 6
7#include <dt-bindings/firmware/imx/rsrc.h>
8#include <linux/arm-smccc.h>
7#include <linux/clk-provider.h> 9#include <linux/clk-provider.h>
8#include <linux/err.h> 10#include <linux/err.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
10 12
11#include "clk-scu.h" 13#include "clk-scu.h"
12 14
15#define IMX_SIP_CPUFREQ 0xC2000001
16#define IMX_SIP_SET_CPUFREQ 0x00
17
13static struct imx_sc_ipc *ccm_ipc_handle; 18static struct imx_sc_ipc *ccm_ipc_handle;
14 19
15/* 20/*
@@ -66,6 +71,41 @@ struct imx_sc_msg_get_clock_rate {
66}; 71};
67 72
68/* 73/*
74 * struct imx_sc_msg_get_clock_parent - clock get parent protocol
75 * @hdr: SCU protocol header
76 * @req: get parent request protocol
77 * @resp: get parent response protocol
78 *
79 * This structure describes the SCU protocol of clock get parent
80 */
81struct imx_sc_msg_get_clock_parent {
82 struct imx_sc_rpc_msg hdr;
83 union {
84 struct req_get_clock_parent {
85 __le16 resource;
86 u8 clk;
87 } __packed req;
88 struct resp_get_clock_parent {
89 u8 parent;
90 } resp;
91 } data;
92};
93
94/*
95 * struct imx_sc_msg_set_clock_parent - clock set parent protocol
96 * @hdr: SCU protocol header
97 * @req: set parent request protocol
98 *
99 * This structure describes the SCU protocol of clock set parent
100 */
101struct imx_sc_msg_set_clock_parent {
102 struct imx_sc_rpc_msg hdr;
103 __le16 resource;
104 u8 clk;
105 u8 parent;
106} __packed;
107
108/*
69 * struct imx_sc_msg_req_clock_enable - clock gate protocol 109 * struct imx_sc_msg_req_clock_enable - clock gate protocol
70 * @hdr: SCU protocol header 110 * @hdr: SCU protocol header
71 * @resource: clock resource to gate 111 * @resource: clock resource to gate
@@ -145,6 +185,25 @@ static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
145 return rate; 185 return rate;
146} 186}
147 187
188static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
189 unsigned long parent_rate)
190{
191 struct clk_scu *clk = to_clk_scu(hw);
192 struct arm_smccc_res res;
193 unsigned long cluster_id;
194
195 if (clk->rsrc_id == IMX_SC_R_A35)
196 cluster_id = 0;
197 else
198 return -EINVAL;
199
200 /* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
201 arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
202 cluster_id, rate, 0, 0, 0, 0, &res);
203
204 return 0;
205}
206
148/* 207/*
149 * clk_scu_set_rate - Set rate for a SCU clock 208 * clk_scu_set_rate - Set rate for a SCU clock
150 * @hw: clock to change rate for 209 * @hw: clock to change rate for
@@ -173,6 +232,49 @@ static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
173 return imx_scu_call_rpc(ccm_ipc_handle, &msg, true); 232 return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
174} 233}
175 234
235static u8 clk_scu_get_parent(struct clk_hw *hw)
236{
237 struct clk_scu *clk = to_clk_scu(hw);
238 struct imx_sc_msg_get_clock_parent msg;
239 struct imx_sc_rpc_msg *hdr = &msg.hdr;
240 int ret;
241
242 hdr->ver = IMX_SC_RPC_VERSION;
243 hdr->svc = IMX_SC_RPC_SVC_PM;
244 hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
245 hdr->size = 2;
246
247 msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
248 msg.data.req.clk = clk->clk_type;
249
250 ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
251 if (ret) {
252 pr_err("%s: failed to get clock parent %d\n",
253 clk_hw_get_name(hw), ret);
254 return 0;
255 }
256
257 return msg.data.resp.parent;
258}
259
260static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
261{
262 struct clk_scu *clk = to_clk_scu(hw);
263 struct imx_sc_msg_set_clock_parent msg;
264 struct imx_sc_rpc_msg *hdr = &msg.hdr;
265
266 hdr->ver = IMX_SC_RPC_VERSION;
267 hdr->svc = IMX_SC_RPC_SVC_PM;
268 hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
269 hdr->size = 2;
270
271 msg.resource = cpu_to_le16(clk->rsrc_id);
272 msg.clk = clk->clk_type;
273 msg.parent = index;
274
275 return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
276}
277
176static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource, 278static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
177 u8 clk, bool enable, bool autog) 279 u8 clk, bool enable, bool autog)
178{ 280{
@@ -228,11 +330,22 @@ static const struct clk_ops clk_scu_ops = {
228 .recalc_rate = clk_scu_recalc_rate, 330 .recalc_rate = clk_scu_recalc_rate,
229 .round_rate = clk_scu_round_rate, 331 .round_rate = clk_scu_round_rate,
230 .set_rate = clk_scu_set_rate, 332 .set_rate = clk_scu_set_rate,
333 .get_parent = clk_scu_get_parent,
334 .set_parent = clk_scu_set_parent,
335 .prepare = clk_scu_prepare,
336 .unprepare = clk_scu_unprepare,
337};
338
339static const struct clk_ops clk_scu_cpu_ops = {
340 .recalc_rate = clk_scu_recalc_rate,
341 .round_rate = clk_scu_round_rate,
342 .set_rate = clk_scu_atf_set_cpu_rate,
231 .prepare = clk_scu_prepare, 343 .prepare = clk_scu_prepare,
232 .unprepare = clk_scu_unprepare, 344 .unprepare = clk_scu_unprepare,
233}; 345};
234 346
235struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type) 347struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
348 int num_parents, u32 rsrc_id, u8 clk_type)
236{ 349{
237 struct clk_init_data init; 350 struct clk_init_data init;
238 struct clk_scu *clk; 351 struct clk_scu *clk;
@@ -248,7 +361,13 @@ struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type)
248 361
249 init.name = name; 362 init.name = name;
250 init.ops = &clk_scu_ops; 363 init.ops = &clk_scu_ops;
251 init.num_parents = 0; 364 if (rsrc_id == IMX_SC_R_A35)
365 init.ops = &clk_scu_cpu_ops;
366 else
367 init.ops = &clk_scu_ops;
368 init.parent_names = parents;
369 init.num_parents = num_parents;
370
252 /* 371 /*
253 * Note on MX8, the clocks are tightly coupled with power domain 372 * Note on MX8, the clocks are tightly coupled with power domain
254 * that once the power domain is off, the clock status may be 373 * that once the power domain is off, the clock status may be
diff --git a/drivers/clk/imx/clk-scu.h b/drivers/clk/imx/clk-scu.h
index 52c1746ec988..2bcfaf06a458 100644
--- a/drivers/clk/imx/clk-scu.h
+++ b/drivers/clk/imx/clk-scu.h
@@ -10,7 +10,21 @@
10#include <linux/firmware/imx/sci.h> 10#include <linux/firmware/imx/sci.h>
11 11
12int imx_clk_scu_init(void); 12int imx_clk_scu_init(void);
13struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type); 13
14struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
15 int num_parents, u32 rsrc_id, u8 clk_type);
16
17static inline struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id,
18 u8 clk_type)
19{
20 return __imx_clk_scu(name, NULL, 0, rsrc_id, clk_type);
21}
22
23static inline struct clk_hw *imx_clk_scu2(const char *name, const char * const *parents,
24 int num_parents, u32 rsrc_id, u8 clk_type)
25{
26 return __imx_clk_scu(name, parents, num_parents, rsrc_id, clk_type);
27}
14 28
15struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name, 29struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
16 unsigned long flags, void __iomem *reg, 30 unsigned long flags, void __iomem *reg,
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 6dae54325a91..a334667c450a 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -203,6 +203,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
203 np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop"); 203 np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop");
204 anatop_base = of_iomap(np, 0); 204 anatop_base = of_iomap(np, 0);
205 BUG_ON(!anatop_base); 205 BUG_ON(!anatop_base);
206 of_node_put(np);
206 207
207 np = ccm_node; 208 np = ccm_node;
208 ccm_base = of_iomap(np, 0); 209 ccm_base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 028312de21b8..5748ec8673e4 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -27,6 +27,30 @@ enum imx_sccg_pll_type {
27 SCCG_PLL2, 27 SCCG_PLL2,
28}; 28};
29 29
30enum imx_pll14xx_type {
31 PLL_1416X,
32 PLL_1443X,
33};
34
35/* NOTE: Rate table should be kept sorted in descending order. */
36struct imx_pll14xx_rate_table {
37 unsigned int rate;
38 unsigned int pdiv;
39 unsigned int mdiv;
40 unsigned int sdiv;
41 unsigned int kdiv;
42};
43
44struct imx_pll14xx_clk {
45 enum imx_pll14xx_type type;
46 const struct imx_pll14xx_rate_table *rate_table;
47 int rate_count;
48 int flags;
49};
50
51struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
52 void __iomem *base, const struct imx_pll14xx_clk *pll_clk);
53
30struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name, 54struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
31 const char *parent, void __iomem *base); 55 const char *parent, void __iomem *base);
32 56
@@ -36,9 +60,12 @@ struct clk *imx_clk_pllv2(const char *name, const char *parent,
36struct clk *imx_clk_frac_pll(const char *name, const char *parent_name, 60struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
37 void __iomem *base); 61 void __iomem *base);
38 62
39struct clk *imx_clk_sccg_pll(const char *name, const char *parent_name, 63struct clk *imx_clk_sccg_pll(const char *name,
40 void __iomem *base, 64 const char * const *parent_names,
41 enum imx_sccg_pll_type pll_type); 65 u8 num_parents,
66 u8 parent, u8 bypass1, u8 bypass2,
67 void __iomem *base,
68 unsigned long flags);
42 69
43enum imx_pllv3_type { 70enum imx_pllv3_type {
44 IMX_PLLV3_GENERIC, 71 IMX_PLLV3_GENERIC,
@@ -329,7 +356,8 @@ static inline struct clk *imx_clk_mux_flags(const char *name,
329} 356}
330 357
331static inline struct clk *imx_clk_mux2_flags(const char *name, 358static inline struct clk *imx_clk_mux2_flags(const char *name,
332 void __iomem *reg, u8 shift, u8 width, const char **parents, 359 void __iomem *reg, u8 shift, u8 width,
360 const char * const *parents,
333 int num_parents, unsigned long flags) 361 int num_parents, unsigned long flags)
334{ 362{
335 return clk_register_mux(NULL, name, parents, num_parents, 363 return clk_register_mux(NULL, name, parents, num_parents,
@@ -354,7 +382,7 @@ struct clk *imx_clk_cpu(const char *name, const char *parent_name,
354 struct clk *step); 382 struct clk *step);
355 383
356struct clk *imx8m_clk_composite_flags(const char *name, 384struct clk *imx8m_clk_composite_flags(const char *name,
357 const char **parent_names, 385 const char * const *parent_names,
358 int num_parents, void __iomem *reg, 386 int num_parents, void __iomem *reg,
359 unsigned long flags); 387 unsigned long flags);
360 388
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 991d4093726e..2895a5ae814d 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -1463,7 +1463,6 @@ static struct platform_driver clk_mt2712_drv = {
1463 .probe = clk_mt2712_probe, 1463 .probe = clk_mt2712_probe,
1464 .driver = { 1464 .driver = {
1465 .name = "clk-mt2712", 1465 .name = "clk-mt2712",
1466 .owner = THIS_MODULE,
1467 .of_match_table = of_match_clk_mt2712, 1466 .of_match_table = of_match_clk_mt2712,
1468 }, 1467 },
1469}; 1468};
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index efaa70f682b4..3858747f5438 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -1,27 +1,52 @@
1config COMMON_CLK_AMLOGIC 1config COMMON_CLK_MESON_INPUT
2 bool 2 tristate
3 depends on ARCH_MESON || COMPILE_TEST
4 select COMMON_CLK_REGMAP_MESON
5 3
6config COMMON_CLK_AMLOGIC_AUDIO 4config COMMON_CLK_MESON_REGMAP
7 bool 5 tristate
8 depends on ARCH_MESON || COMPILE_TEST 6 select REGMAP
9 select COMMON_CLK_AMLOGIC
10 7
11config COMMON_CLK_MESON_AO 8config COMMON_CLK_MESON_DUALDIV
12 bool 9 tristate
13 depends on OF 10 select COMMON_CLK_MESON_REGMAP
14 depends on ARCH_MESON || COMPILE_TEST 11
15 select COMMON_CLK_REGMAP_MESON 12config COMMON_CLK_MESON_MPLL
13 tristate
14 select COMMON_CLK_MESON_REGMAP
15
16config COMMON_CLK_MESON_PHASE
17 tristate
18 select COMMON_CLK_MESON_REGMAP
19
20config COMMON_CLK_MESON_PLL
21 tristate
22 select COMMON_CLK_MESON_REGMAP
23
24config COMMON_CLK_MESON_SCLK_DIV
25 tristate
26 select COMMON_CLK_MESON_REGMAP
27
28config COMMON_CLK_MESON_VID_PLL_DIV
29 tristate
30 select COMMON_CLK_MESON_REGMAP
31
32config COMMON_CLK_MESON_AO_CLKC
33 tristate
34 select COMMON_CLK_MESON_REGMAP
35 select COMMON_CLK_MESON_INPUT
16 select RESET_CONTROLLER 36 select RESET_CONTROLLER
17 37
18config COMMON_CLK_REGMAP_MESON 38config COMMON_CLK_MESON_EE_CLKC
19 bool 39 tristate
20 select REGMAP 40 select COMMON_CLK_MESON_REGMAP
41 select COMMON_CLK_MESON_INPUT
21 42
22config COMMON_CLK_MESON8B 43config COMMON_CLK_MESON8B
23 bool 44 bool
24 select COMMON_CLK_AMLOGIC 45 depends on ARCH_MESON
46 select COMMON_CLK_MESON_REGMAP
47 select COMMON_CLK_MESON_MPLL
48 select COMMON_CLK_MESON_PLL
49 select MFD_SYSCON
25 select RESET_CONTROLLER 50 select RESET_CONTROLLER
26 help 51 help
27 Support for the clock controller on AmLogic S802 (Meson8), 52 Support for the clock controller on AmLogic S802 (Meson8),
@@ -30,8 +55,14 @@ config COMMON_CLK_MESON8B
30 55
31config COMMON_CLK_GXBB 56config COMMON_CLK_GXBB
32 bool 57 bool
33 select COMMON_CLK_AMLOGIC 58 depends on ARCH_MESON
34 select COMMON_CLK_MESON_AO 59 select COMMON_CLK_MESON_REGMAP
60 select COMMON_CLK_MESON_DUALDIV
61 select COMMON_CLK_MESON_VID_PLL_DIV
62 select COMMON_CLK_MESON_MPLL
63 select COMMON_CLK_MESON_PLL
64 select COMMON_CLK_MESON_AO_CLKC
65 select COMMON_CLK_MESON_EE_CLKC
35 select MFD_SYSCON 66 select MFD_SYSCON
36 help 67 help
37 Support for the clock controller on AmLogic S905 devices, aka gxbb. 68 Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -39,8 +70,13 @@ config COMMON_CLK_GXBB
39 70
40config COMMON_CLK_AXG 71config COMMON_CLK_AXG
41 bool 72 bool
42 select COMMON_CLK_AMLOGIC 73 depends on ARCH_MESON
43 select COMMON_CLK_MESON_AO 74 select COMMON_CLK_MESON_REGMAP
75 select COMMON_CLK_MESON_DUALDIV
76 select COMMON_CLK_MESON_MPLL
77 select COMMON_CLK_MESON_PLL
78 select COMMON_CLK_MESON_AO_CLKC
79 select COMMON_CLK_MESON_EE_CLKC
44 select MFD_SYSCON 80 select MFD_SYSCON
45 help 81 help
46 Support for the clock controller on AmLogic A113D devices, aka axg. 82 Support for the clock controller on AmLogic A113D devices, aka axg.
@@ -48,9 +84,26 @@ config COMMON_CLK_AXG
48 84
49config COMMON_CLK_AXG_AUDIO 85config COMMON_CLK_AXG_AUDIO
50 tristate "Meson AXG Audio Clock Controller Driver" 86 tristate "Meson AXG Audio Clock Controller Driver"
51 depends on COMMON_CLK_AXG 87 depends on ARCH_MESON
52 select COMMON_CLK_AMLOGIC_AUDIO 88 select COMMON_CLK_MESON_INPUT
53 select MFD_SYSCON 89 select COMMON_CLK_MESON_REGMAP
90 select COMMON_CLK_MESON_PHASE
91 select COMMON_CLK_MESON_SCLK_DIV
92 select REGMAP_MMIO
54 help 93 help
55 Support for the audio clock controller on AmLogic A113D devices, 94 Support for the audio clock controller on AmLogic A113D devices,
56 aka axg, Say Y if you want audio subsystem to work. 95 aka axg, Say Y if you want audio subsystem to work.
96
97config COMMON_CLK_G12A
98 bool
99 depends on ARCH_MESON
100 select COMMON_CLK_MESON_REGMAP
101 select COMMON_CLK_MESON_DUALDIV
102 select COMMON_CLK_MESON_MPLL
103 select COMMON_CLK_MESON_PLL
104 select COMMON_CLK_MESON_AO_CLKC
105 select COMMON_CLK_MESON_EE_CLKC
106 select MFD_SYSCON
107 help
108 Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2
109 devices, aka g12a. Say Y if you want peripherals to work.
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index a849aa809825..021fc290e749 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -1,13 +1,20 @@
1# 1# Amlogic clock drivers
2# Makefile for Meson specific clk
3#
4 2
5obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-phase.o vid-pll-div.o 3obj-$(CONFIG_COMMON_CLK_MESON_AO_CLKC) += meson-aoclk.o
6obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-input.o 4obj-$(CONFIG_COMMON_CLK_MESON_DUALDIV) += clk-dualdiv.o
7obj-$(CONFIG_COMMON_CLK_AMLOGIC_AUDIO) += clk-triphase.o sclk-div.o 5obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o
8obj-$(CONFIG_COMMON_CLK_MESON_AO) += meson-aoclk.o 6obj-$(CONFIG_COMMON_CLK_MESON_INPUT) += clk-input.o
7obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o
8obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o
9obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
10obj-$(CONFIG_COMMON_CLK_MESON_REGMAP) += clk-regmap.o
11obj-$(CONFIG_COMMON_CLK_MESON_SCLK_DIV) += sclk-div.o
12obj-$(CONFIG_COMMON_CLK_MESON_VID_PLL_DIV) += vid-pll-div.o
13
14# Amlogic Clock controllers
15
16obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
17obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
18obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o
19obj-$(CONFIG_COMMON_CLK_G12A) += g12a.o g12a-aoclk.o
9obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o 20obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
10obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o
11obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
12obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
13obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index 29e088542387..0086f31288eb 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -12,10 +12,27 @@
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/reset-controller.h> 13#include <linux/reset-controller.h>
14#include <linux/mfd/syscon.h> 14#include <linux/mfd/syscon.h>
15#include "clk-regmap.h"
16#include "meson-aoclk.h" 15#include "meson-aoclk.h"
17#include "axg-aoclk.h" 16#include "axg-aoclk.h"
18 17
18#include "clk-regmap.h"
19#include "clk-dualdiv.h"
20
21#define IN_PREFIX "ao-in-"
22
23/*
24 * AO Configuration Clock registers offsets
25 * Register offsets from the data sheet must be multiplied by 4.
26 */
27#define AO_RTI_PWR_CNTL_REG1 0x0C
28#define AO_RTI_PWR_CNTL_REG0 0x10
29#define AO_RTI_GEN_CNTL_REG0 0x40
30#define AO_OSCIN_CNTL 0x58
31#define AO_CRT_CLK_CNTL1 0x68
32#define AO_SAR_CLK 0x90
33#define AO_RTC_ALT_CLK_CNTL0 0x94
34#define AO_RTC_ALT_CLK_CNTL1 0x98
35
19#define AXG_AO_GATE(_name, _bit) \ 36#define AXG_AO_GATE(_name, _bit) \
20static struct clk_regmap axg_aoclk_##_name = { \ 37static struct clk_regmap axg_aoclk_##_name = { \
21 .data = &(struct clk_regmap_gate_data) { \ 38 .data = &(struct clk_regmap_gate_data) { \
@@ -25,7 +42,7 @@ static struct clk_regmap axg_aoclk_##_name = { \
25 .hw.init = &(struct clk_init_data) { \ 42 .hw.init = &(struct clk_init_data) { \
26 .name = "axg_ao_" #_name, \ 43 .name = "axg_ao_" #_name, \
27 .ops = &clk_regmap_gate_ops, \ 44 .ops = &clk_regmap_gate_ops, \
28 .parent_names = (const char *[]){ "clk81" }, \ 45 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
29 .num_parents = 1, \ 46 .num_parents = 1, \
30 .flags = CLK_IGNORE_UNUSED, \ 47 .flags = CLK_IGNORE_UNUSED, \
31 }, \ 48 }, \
@@ -39,17 +56,141 @@ AXG_AO_GATE(uart2, 5);
39AXG_AO_GATE(ir_blaster, 6); 56AXG_AO_GATE(ir_blaster, 6);
40AXG_AO_GATE(saradc, 7); 57AXG_AO_GATE(saradc, 7);
41 58
59static struct clk_regmap axg_aoclk_cts_oscin = {
60 .data = &(struct clk_regmap_gate_data){
61 .offset = AO_RTI_PWR_CNTL_REG0,
62 .bit_idx = 14,
63 },
64 .hw.init = &(struct clk_init_data){
65 .name = "cts_oscin",
66 .ops = &clk_regmap_gate_ro_ops,
67 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
68 .num_parents = 1,
69 },
70};
71
72static struct clk_regmap axg_aoclk_32k_pre = {
73 .data = &(struct clk_regmap_gate_data){
74 .offset = AO_RTC_ALT_CLK_CNTL0,
75 .bit_idx = 31,
76 },
77 .hw.init = &(struct clk_init_data){
78 .name = "axg_ao_32k_pre",
79 .ops = &clk_regmap_gate_ops,
80 .parent_names = (const char *[]){ "cts_oscin" },
81 .num_parents = 1,
82 },
83};
84
85static const struct meson_clk_dualdiv_param axg_32k_div_table[] = {
86 {
87 .dual = 1,
88 .n1 = 733,
89 .m1 = 8,
90 .n2 = 732,
91 .m2 = 11,
92 }, {}
93};
94
95static struct clk_regmap axg_aoclk_32k_div = {
96 .data = &(struct meson_clk_dualdiv_data){
97 .n1 = {
98 .reg_off = AO_RTC_ALT_CLK_CNTL0,
99 .shift = 0,
100 .width = 12,
101 },
102 .n2 = {
103 .reg_off = AO_RTC_ALT_CLK_CNTL0,
104 .shift = 12,
105 .width = 12,
106 },
107 .m1 = {
108 .reg_off = AO_RTC_ALT_CLK_CNTL1,
109 .shift = 0,
110 .width = 12,
111 },
112 .m2 = {
113 .reg_off = AO_RTC_ALT_CLK_CNTL1,
114 .shift = 12,
115 .width = 12,
116 },
117 .dual = {
118 .reg_off = AO_RTC_ALT_CLK_CNTL0,
119 .shift = 28,
120 .width = 1,
121 },
122 .table = axg_32k_div_table,
123 },
124 .hw.init = &(struct clk_init_data){
125 .name = "axg_ao_32k_div",
126 .ops = &meson_clk_dualdiv_ops,
127 .parent_names = (const char *[]){ "axg_ao_32k_pre" },
128 .num_parents = 1,
129 },
130};
131
132static struct clk_regmap axg_aoclk_32k_sel = {
133 .data = &(struct clk_regmap_mux_data) {
134 .offset = AO_RTC_ALT_CLK_CNTL1,
135 .mask = 0x1,
136 .shift = 24,
137 .flags = CLK_MUX_ROUND_CLOSEST,
138 },
139 .hw.init = &(struct clk_init_data){
140 .name = "axg_ao_32k_sel",
141 .ops = &clk_regmap_mux_ops,
142 .parent_names = (const char *[]){ "axg_ao_32k_div",
143 "axg_ao_32k_pre" },
144 .num_parents = 2,
145 .flags = CLK_SET_RATE_PARENT,
146 },
147};
148
149static struct clk_regmap axg_aoclk_32k = {
150 .data = &(struct clk_regmap_gate_data){
151 .offset = AO_RTC_ALT_CLK_CNTL0,
152 .bit_idx = 30,
153 },
154 .hw.init = &(struct clk_init_data){
155 .name = "axg_ao_32k",
156 .ops = &clk_regmap_gate_ops,
157 .parent_names = (const char *[]){ "axg_ao_32k_sel" },
158 .num_parents = 1,
159 .flags = CLK_SET_RATE_PARENT,
160 },
161};
162
163static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
164 .data = &(struct clk_regmap_mux_data) {
165 .offset = AO_RTI_PWR_CNTL_REG0,
166 .mask = 0x1,
167 .shift = 10,
168 .flags = CLK_MUX_ROUND_CLOSEST,
169 },
170 .hw.init = &(struct clk_init_data){
171 .name = "axg_ao_cts_rtc_oscin",
172 .ops = &clk_regmap_mux_ops,
173 .parent_names = (const char *[]){ "axg_ao_32k",
174 IN_PREFIX "ext_32k-0" },
175 .num_parents = 2,
176 .flags = CLK_SET_RATE_PARENT,
177 },
178};
179
42static struct clk_regmap axg_aoclk_clk81 = { 180static struct clk_regmap axg_aoclk_clk81 = {
43 .data = &(struct clk_regmap_mux_data) { 181 .data = &(struct clk_regmap_mux_data) {
44 .offset = AO_RTI_PWR_CNTL_REG0, 182 .offset = AO_RTI_PWR_CNTL_REG0,
45 .mask = 0x1, 183 .mask = 0x1,
46 .shift = 8, 184 .shift = 8,
185 .flags = CLK_MUX_ROUND_CLOSEST,
47 }, 186 },
48 .hw.init = &(struct clk_init_data){ 187 .hw.init = &(struct clk_init_data){
49 .name = "axg_ao_clk81", 188 .name = "axg_ao_clk81",
50 .ops = &clk_regmap_mux_ro_ops, 189 .ops = &clk_regmap_mux_ro_ops,
51 .parent_names = (const char *[]){ "clk81", "ao_alt_xtal"}, 190 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
191 "axg_ao_cts_rtc_oscin"},
52 .num_parents = 2, 192 .num_parents = 2,
193 .flags = CLK_SET_RATE_PARENT,
53 }, 194 },
54}; 195};
55 196
@@ -62,7 +203,8 @@ static struct clk_regmap axg_aoclk_saradc_mux = {
62 .hw.init = &(struct clk_init_data){ 203 .hw.init = &(struct clk_init_data){
63 .name = "axg_ao_saradc_mux", 204 .name = "axg_ao_saradc_mux",
64 .ops = &clk_regmap_mux_ops, 205 .ops = &clk_regmap_mux_ops,
65 .parent_names = (const char *[]){ "xtal", "axg_ao_clk81" }, 206 .parent_names = (const char *[]){ IN_PREFIX "xtal",
207 "axg_ao_clk81" },
66 .num_parents = 2, 208 .num_parents = 2,
67 }, 209 },
68}; 210};
@@ -106,17 +248,23 @@ static const unsigned int axg_aoclk_reset[] = {
106}; 248};
107 249
108static struct clk_regmap *axg_aoclk_regmap[] = { 250static struct clk_regmap *axg_aoclk_regmap[] = {
109 [CLKID_AO_REMOTE] = &axg_aoclk_remote, 251 &axg_aoclk_remote,
110 [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master, 252 &axg_aoclk_i2c_master,
111 [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave, 253 &axg_aoclk_i2c_slave,
112 [CLKID_AO_UART1] = &axg_aoclk_uart1, 254 &axg_aoclk_uart1,
113 [CLKID_AO_UART2] = &axg_aoclk_uart2, 255 &axg_aoclk_uart2,
114 [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster, 256 &axg_aoclk_ir_blaster,
115 [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc, 257 &axg_aoclk_saradc,
116 [CLKID_AO_CLK81] = &axg_aoclk_clk81, 258 &axg_aoclk_cts_oscin,
117 [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux, 259 &axg_aoclk_32k_pre,
118 [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div, 260 &axg_aoclk_32k_div,
119 [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate, 261 &axg_aoclk_32k_sel,
262 &axg_aoclk_32k,
263 &axg_aoclk_cts_rtc_oscin,
264 &axg_aoclk_clk81,
265 &axg_aoclk_saradc_mux,
266 &axg_aoclk_saradc_div,
267 &axg_aoclk_saradc_gate,
120}; 268};
121 269
122static const struct clk_hw_onecell_data axg_aoclk_onecell_data = { 270static const struct clk_hw_onecell_data axg_aoclk_onecell_data = {
@@ -132,10 +280,22 @@ static const struct clk_hw_onecell_data axg_aoclk_onecell_data = {
132 [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw, 280 [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw,
133 [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw, 281 [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw,
134 [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw, 282 [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw,
283 [CLKID_AO_CTS_OSCIN] = &axg_aoclk_cts_oscin.hw,
284 [CLKID_AO_32K_PRE] = &axg_aoclk_32k_pre.hw,
285 [CLKID_AO_32K_DIV] = &axg_aoclk_32k_div.hw,
286 [CLKID_AO_32K_SEL] = &axg_aoclk_32k_sel.hw,
287 [CLKID_AO_32K] = &axg_aoclk_32k.hw,
288 [CLKID_AO_CTS_RTC_OSCIN] = &axg_aoclk_cts_rtc_oscin.hw,
135 }, 289 },
136 .num = NR_CLKS, 290 .num = NR_CLKS,
137}; 291};
138 292
293static const struct meson_aoclk_input axg_aoclk_inputs[] = {
294 { .name = "xtal", .required = true },
295 { .name = "mpeg-clk", .required = true },
296 { .name = "ext-32k-0", .required = false },
297};
298
139static const struct meson_aoclk_data axg_aoclkc_data = { 299static const struct meson_aoclk_data axg_aoclkc_data = {
140 .reset_reg = AO_RTI_GEN_CNTL_REG0, 300 .reset_reg = AO_RTI_GEN_CNTL_REG0,
141 .num_reset = ARRAY_SIZE(axg_aoclk_reset), 301 .num_reset = ARRAY_SIZE(axg_aoclk_reset),
@@ -143,6 +303,9 @@ static const struct meson_aoclk_data axg_aoclkc_data = {
143 .num_clks = ARRAY_SIZE(axg_aoclk_regmap), 303 .num_clks = ARRAY_SIZE(axg_aoclk_regmap),
144 .clks = axg_aoclk_regmap, 304 .clks = axg_aoclk_regmap,
145 .hw_data = &axg_aoclk_onecell_data, 305 .hw_data = &axg_aoclk_onecell_data,
306 .inputs = axg_aoclk_inputs,
307 .num_inputs = ARRAY_SIZE(axg_aoclk_inputs),
308 .input_prefix = IN_PREFIX,
146}; 309};
147 310
148static const struct of_device_id axg_aoclkc_match_table[] = { 311static const struct of_device_id axg_aoclkc_match_table[] = {
diff --git a/drivers/clk/meson/axg-aoclk.h b/drivers/clk/meson/axg-aoclk.h
index 91384d8dd844..3cc27e85170f 100644
--- a/drivers/clk/meson/axg-aoclk.h
+++ b/drivers/clk/meson/axg-aoclk.h
@@ -10,18 +10,7 @@
10#ifndef __AXG_AOCLKC_H 10#ifndef __AXG_AOCLKC_H
11#define __AXG_AOCLKC_H 11#define __AXG_AOCLKC_H
12 12
13#define NR_CLKS 11 13#define NR_CLKS 17
14/* AO Configuration Clock registers offsets
15 * Register offsets from the data sheet must be multiplied by 4.
16 */
17#define AO_RTI_PWR_CNTL_REG1 0x0C
18#define AO_RTI_PWR_CNTL_REG0 0x10
19#define AO_RTI_GEN_CNTL_REG0 0x40
20#define AO_OSCIN_CNTL 0x58
21#define AO_CRT_CLK_CNTL1 0x68
22#define AO_SAR_CLK 0x90
23#define AO_RTC_ALT_CLK_CNTL0 0x94
24#define AO_RTC_ALT_CLK_CNTL1 0x98
25 14
26#include <dt-bindings/clock/axg-aoclkc.h> 15#include <dt-bindings/clock/axg-aoclkc.h>
27#include <dt-bindings/reset/axg-aoclkc.h> 16#include <dt-bindings/reset/axg-aoclkc.h>
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 8ac3a2295473..7ab200b6c3bf 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -14,8 +14,11 @@
14#include <linux/reset.h> 14#include <linux/reset.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include "clkc-audio.h"
18#include "axg-audio.h" 17#include "axg-audio.h"
18#include "clk-input.h"
19#include "clk-regmap.h"
20#include "clk-phase.h"
21#include "sclk-div.h"
19 22
20#define AXG_MST_IN_COUNT 8 23#define AXG_MST_IN_COUNT 8
21#define AXG_SLV_SCLK_COUNT 10 24#define AXG_SLV_SCLK_COUNT 10
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 792735d7e46e..7a8ef80e5f2c 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -9,16 +9,17 @@
9 * Author: Qiufang Dai <qiufang.dai@amlogic.com> 9 * Author: Qiufang Dai <qiufang.dai@amlogic.com>
10 */ 10 */
11 11
12#include <linux/clk.h>
13#include <linux/clk-provider.h> 12#include <linux/clk-provider.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <linux/of_device.h> 14#include <linux/of_device.h>
16#include <linux/mfd/syscon.h>
17#include <linux/platform_device.h> 15#include <linux/platform_device.h>
18#include <linux/regmap.h>
19 16
20#include "clkc.h" 17#include "clk-input.h"
18#include "clk-regmap.h"
19#include "clk-pll.h"
20#include "clk-mpll.h"
21#include "axg.h" 21#include "axg.h"
22#include "meson-eeclk.h"
22 23
23static DEFINE_SPINLOCK(meson_clk_lock); 24static DEFINE_SPINLOCK(meson_clk_lock);
24 25
@@ -58,7 +59,7 @@ static struct clk_regmap axg_fixed_pll_dco = {
58 .hw.init = &(struct clk_init_data){ 59 .hw.init = &(struct clk_init_data){
59 .name = "fixed_pll_dco", 60 .name = "fixed_pll_dco",
60 .ops = &meson_clk_pll_ro_ops, 61 .ops = &meson_clk_pll_ro_ops,
61 .parent_names = (const char *[]){ "xtal" }, 62 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
62 .num_parents = 1, 63 .num_parents = 1,
63 }, 64 },
64}; 65};
@@ -113,7 +114,7 @@ static struct clk_regmap axg_sys_pll_dco = {
113 .hw.init = &(struct clk_init_data){ 114 .hw.init = &(struct clk_init_data){
114 .name = "sys_pll_dco", 115 .name = "sys_pll_dco",
115 .ops = &meson_clk_pll_ro_ops, 116 .ops = &meson_clk_pll_ro_ops,
116 .parent_names = (const char *[]){ "xtal" }, 117 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
117 .num_parents = 1, 118 .num_parents = 1,
118 }, 119 },
119}; 120};
@@ -214,7 +215,7 @@ static struct clk_regmap axg_gp0_pll_dco = {
214 .hw.init = &(struct clk_init_data){ 215 .hw.init = &(struct clk_init_data){
215 .name = "gp0_pll_dco", 216 .name = "gp0_pll_dco",
216 .ops = &meson_clk_pll_ops, 217 .ops = &meson_clk_pll_ops,
217 .parent_names = (const char *[]){ "xtal" }, 218 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
218 .num_parents = 1, 219 .num_parents = 1,
219 }, 220 },
220}; 221};
@@ -283,7 +284,7 @@ static struct clk_regmap axg_hifi_pll_dco = {
283 .hw.init = &(struct clk_init_data){ 284 .hw.init = &(struct clk_init_data){
284 .name = "hifi_pll_dco", 285 .name = "hifi_pll_dco",
285 .ops = &meson_clk_pll_ops, 286 .ops = &meson_clk_pll_ops,
286 .parent_names = (const char *[]){ "xtal" }, 287 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
287 .num_parents = 1, 288 .num_parents = 1,
288 }, 289 },
289}; 290};
@@ -701,7 +702,7 @@ static struct clk_regmap axg_pcie_pll_dco = {
701 .hw.init = &(struct clk_init_data){ 702 .hw.init = &(struct clk_init_data){
702 .name = "pcie_pll_dco", 703 .name = "pcie_pll_dco",
703 .ops = &meson_clk_pll_ops, 704 .ops = &meson_clk_pll_ops,
704 .parent_names = (const char *[]){ "xtal" }, 705 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
705 .num_parents = 1, 706 .num_parents = 1,
706 }, 707 },
707}; 708};
@@ -803,7 +804,7 @@ static struct clk_regmap axg_pcie_cml_en1 = {
803 804
804static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 }; 805static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
805static const char * const clk81_parent_names[] = { 806static const char * const clk81_parent_names[] = {
806 "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4", 807 IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
807 "fclk_div3", "fclk_div5" 808 "fclk_div3", "fclk_div5"
808}; 809};
809 810
@@ -852,7 +853,7 @@ static struct clk_regmap axg_clk81 = {
852}; 853};
853 854
854static const char * const axg_sd_emmc_clk0_parent_names[] = { 855static const char * const axg_sd_emmc_clk0_parent_names[] = {
855 "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7", 856 IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
856 857
857 /* 858 /*
858 * Following these parent clocks, we should also have had mpll2, mpll3 859 * Following these parent clocks, we should also have had mpll2, mpll3
@@ -957,7 +958,7 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
957static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8, 958static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
958 9, 10, 11, 13, 14, }; 959 9, 10, 11, 13, 14, };
959static const char * const gen_clk_parent_names[] = { 960static const char * const gen_clk_parent_names[] = {
960 "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3", 961 IN_PREFIX "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3",
961 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll", 962 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
962}; 963};
963 964
@@ -1255,46 +1256,20 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
1255 &axg_pcie_pll_od, 1256 &axg_pcie_pll_od,
1256}; 1257};
1257 1258
1259static const struct meson_eeclkc_data axg_clkc_data = {
1260 .regmap_clks = axg_clk_regmaps,
1261 .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
1262 .hw_onecell_data = &axg_hw_onecell_data,
1263};
1264
1265
1258static const struct of_device_id clkc_match_table[] = { 1266static const struct of_device_id clkc_match_table[] = {
1259 { .compatible = "amlogic,axg-clkc" }, 1267 { .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
1260 {} 1268 {}
1261}; 1269};
1262 1270
1263static int axg_clkc_probe(struct platform_device *pdev)
1264{
1265 struct device *dev = &pdev->dev;
1266 struct regmap *map;
1267 int ret, i;
1268
1269 /* Get the hhi system controller node if available */
1270 map = syscon_node_to_regmap(of_get_parent(dev->of_node));
1271 if (IS_ERR(map)) {
1272 dev_err(dev, "failed to get HHI regmap\n");
1273 return PTR_ERR(map);
1274 }
1275
1276 /* Populate regmap for the regmap backed clocks */
1277 for (i = 0; i < ARRAY_SIZE(axg_clk_regmaps); i++)
1278 axg_clk_regmaps[i]->map = map;
1279
1280 for (i = 0; i < axg_hw_onecell_data.num; i++) {
1281 /* array might be sparse */
1282 if (!axg_hw_onecell_data.hws[i])
1283 continue;
1284
1285 ret = devm_clk_hw_register(dev, axg_hw_onecell_data.hws[i]);
1286 if (ret) {
1287 dev_err(dev, "Clock registration failed\n");
1288 return ret;
1289 }
1290 }
1291
1292 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
1293 &axg_hw_onecell_data);
1294}
1295
1296static struct platform_driver axg_driver = { 1271static struct platform_driver axg_driver = {
1297 .probe = axg_clkc_probe, 1272 .probe = meson_eeclkc_probe,
1298 .driver = { 1273 .driver = {
1299 .name = "axg-clkc", 1274 .name = "axg-clkc",
1300 .of_match_table = clkc_match_table, 1275 .of_match_table = clkc_match_table,
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
new file mode 100644
index 000000000000..c5ca23a5e3e8
--- /dev/null
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -0,0 +1,138 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2017 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 * Author: Jerome Brunet <jbrunet@baylibre.com>
6 */
7
8/*
9 * The AO Domain embeds a dual/divider to generate a more precise
10 * 32,768KHz clock for low-power suspend mode and CEC.
11 * ______ ______
12 * | | | |
13 * | Div1 |-| Cnt1 |
14 * /|______| |______|\
15 * -| ______ ______ X--> Out
16 * \| | | |/
17 * | Div2 |-| Cnt2 |
18 * |______| |______|
19 *
20 * The dividing can be switched to single or dual, with a counter
21 * for each divider to set when the switching is done.
22 */
23
24#include <linux/clk-provider.h>
25#include <linux/module.h>
26
27#include "clk-regmap.h"
28#include "clk-dualdiv.h"
29
30static inline struct meson_clk_dualdiv_data *
31meson_clk_dualdiv_data(struct clk_regmap *clk)
32{
33 return (struct meson_clk_dualdiv_data *)clk->data;
34}
35
36static unsigned long
37__dualdiv_param_to_rate(unsigned long parent_rate,
38 const struct meson_clk_dualdiv_param *p)
39{
40 if (!p->dual)
41 return DIV_ROUND_CLOSEST(parent_rate, p->n1);
42
43 return DIV_ROUND_CLOSEST(parent_rate * (p->m1 + p->m2),
44 p->n1 * p->m1 + p->n2 * p->m2);
45}
46
47static unsigned long meson_clk_dualdiv_recalc_rate(struct clk_hw *hw,
48 unsigned long parent_rate)
49{
50 struct clk_regmap *clk = to_clk_regmap(hw);
51 struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
52 struct meson_clk_dualdiv_param setting;
53
54 setting.dual = meson_parm_read(clk->map, &dualdiv->dual);
55 setting.n1 = meson_parm_read(clk->map, &dualdiv->n1) + 1;
56 setting.m1 = meson_parm_read(clk->map, &dualdiv->m1) + 1;
57 setting.n2 = meson_parm_read(clk->map, &dualdiv->n2) + 1;
58 setting.m2 = meson_parm_read(clk->map, &dualdiv->m2) + 1;
59
60 return __dualdiv_param_to_rate(parent_rate, &setting);
61}
62
63static const struct meson_clk_dualdiv_param *
64__dualdiv_get_setting(unsigned long rate, unsigned long parent_rate,
65 struct meson_clk_dualdiv_data *dualdiv)
66{
67 const struct meson_clk_dualdiv_param *table = dualdiv->table;
68 unsigned long best = 0, now = 0;
69 unsigned int i, best_i = 0;
70
71 if (!table)
72 return NULL;
73
74 for (i = 0; table[i].n1; i++) {
75 now = __dualdiv_param_to_rate(parent_rate, &table[i]);
76
77 /* If we get an exact match, don't bother any further */
78 if (now == rate) {
79 return &table[i];
80 } else if (abs(now - rate) < abs(best - rate)) {
81 best = now;
82 best_i = i;
83 }
84 }
85
86 return (struct meson_clk_dualdiv_param *)&table[best_i];
87}
88
89static long meson_clk_dualdiv_round_rate(struct clk_hw *hw, unsigned long rate,
90 unsigned long *parent_rate)
91{
92 struct clk_regmap *clk = to_clk_regmap(hw);
93 struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
94 const struct meson_clk_dualdiv_param *setting =
95 __dualdiv_get_setting(rate, *parent_rate, dualdiv);
96
97 if (!setting)
98 return meson_clk_dualdiv_recalc_rate(hw, *parent_rate);
99
100 return __dualdiv_param_to_rate(*parent_rate, setting);
101}
102
103static int meson_clk_dualdiv_set_rate(struct clk_hw *hw, unsigned long rate,
104 unsigned long parent_rate)
105{
106 struct clk_regmap *clk = to_clk_regmap(hw);
107 struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
108 const struct meson_clk_dualdiv_param *setting =
109 __dualdiv_get_setting(rate, parent_rate, dualdiv);
110
111 if (!setting)
112 return -EINVAL;
113
114 meson_parm_write(clk->map, &dualdiv->dual, setting->dual);
115 meson_parm_write(clk->map, &dualdiv->n1, setting->n1 - 1);
116 meson_parm_write(clk->map, &dualdiv->m1, setting->m1 - 1);
117 meson_parm_write(clk->map, &dualdiv->n2, setting->n2 - 1);
118 meson_parm_write(clk->map, &dualdiv->m2, setting->m2 - 1);
119
120 return 0;
121}
122
123const struct clk_ops meson_clk_dualdiv_ops = {
124 .recalc_rate = meson_clk_dualdiv_recalc_rate,
125 .round_rate = meson_clk_dualdiv_round_rate,
126 .set_rate = meson_clk_dualdiv_set_rate,
127};
128EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ops);
129
130const struct clk_ops meson_clk_dualdiv_ro_ops = {
131 .recalc_rate = meson_clk_dualdiv_recalc_rate,
132};
133EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ro_ops);
134
135MODULE_DESCRIPTION("Amlogic dual divider driver");
136MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
137MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
138MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-dualdiv.h b/drivers/clk/meson/clk-dualdiv.h
new file mode 100644
index 000000000000..4aa939018012
--- /dev/null
+++ b/drivers/clk/meson/clk-dualdiv.h
@@ -0,0 +1,33 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLK_DUALDIV_H
8#define __MESON_CLK_DUALDIV_H
9
10#include <linux/clk-provider.h>
11#include "parm.h"
12
13struct meson_clk_dualdiv_param {
14 unsigned int n1;
15 unsigned int n2;
16 unsigned int m1;
17 unsigned int m2;
18 unsigned int dual;
19};
20
21struct meson_clk_dualdiv_data {
22 struct parm n1;
23 struct parm n2;
24 struct parm m1;
25 struct parm m2;
26 struct parm dual;
27 const struct meson_clk_dualdiv_param *table;
28};
29
30extern const struct clk_ops meson_clk_dualdiv_ops;
31extern const struct clk_ops meson_clk_dualdiv_ro_ops;
32
33#endif /* __MESON_CLK_DUALDIV_H */
diff --git a/drivers/clk/meson/clk-input.c b/drivers/clk/meson/clk-input.c
index 06b3e3bb6a66..086226e9dba6 100644
--- a/drivers/clk/meson/clk-input.c
+++ b/drivers/clk/meson/clk-input.c
@@ -7,7 +7,8 @@
7#include <linux/clk.h> 7#include <linux/clk.h>
8#include <linux/clk-provider.h> 8#include <linux/clk-provider.h>
9#include <linux/device.h> 9#include <linux/device.h>
10#include "clkc.h" 10#include <linux/module.h>
11#include "clk-input.h"
11 12
12static const struct clk_ops meson_clk_no_ops = {}; 13static const struct clk_ops meson_clk_no_ops = {};
13 14
@@ -42,3 +43,7 @@ struct clk_hw *meson_clk_hw_register_input(struct device *dev,
42 return ret ? ERR_PTR(ret) : hw; 43 return ret ? ERR_PTR(ret) : hw;
43} 44}
44EXPORT_SYMBOL_GPL(meson_clk_hw_register_input); 45EXPORT_SYMBOL_GPL(meson_clk_hw_register_input);
46
47MODULE_DESCRIPTION("Amlogic clock input helper");
48MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
49MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-input.h b/drivers/clk/meson/clk-input.h
new file mode 100644
index 000000000000..4a541b9685a6
--- /dev/null
+++ b/drivers/clk/meson/clk-input.h
@@ -0,0 +1,19 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLK_INPUT_H
8#define __MESON_CLK_INPUT_H
9
10#include <linux/clk-provider.h>
11
12struct device;
13
14struct clk_hw *meson_clk_hw_register_input(struct device *dev,
15 const char *of_name,
16 const char *clk_name,
17 unsigned long flags);
18
19#endif /* __MESON_CLK_INPUT_H */
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 650f75cc15a9..f76850d99e59 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -12,7 +12,11 @@
12 */ 12 */
13 13
14#include <linux/clk-provider.h> 14#include <linux/clk-provider.h>
15#include "clkc.h" 15#include <linux/module.h>
16#include <linux/spinlock.h>
17
18#include "clk-regmap.h"
19#include "clk-mpll.h"
16 20
17#define SDM_DEN 16384 21#define SDM_DEN 16384
18#define N2_MIN 4 22#define N2_MIN 4
@@ -138,9 +142,15 @@ const struct clk_ops meson_clk_mpll_ro_ops = {
138 .recalc_rate = mpll_recalc_rate, 142 .recalc_rate = mpll_recalc_rate,
139 .round_rate = mpll_round_rate, 143 .round_rate = mpll_round_rate,
140}; 144};
145EXPORT_SYMBOL_GPL(meson_clk_mpll_ro_ops);
141 146
142const struct clk_ops meson_clk_mpll_ops = { 147const struct clk_ops meson_clk_mpll_ops = {
143 .recalc_rate = mpll_recalc_rate, 148 .recalc_rate = mpll_recalc_rate,
144 .round_rate = mpll_round_rate, 149 .round_rate = mpll_round_rate,
145 .set_rate = mpll_set_rate, 150 .set_rate = mpll_set_rate,
146}; 151};
152EXPORT_SYMBOL_GPL(meson_clk_mpll_ops);
153
154MODULE_DESCRIPTION("Amlogic MPLL driver");
155MODULE_AUTHOR("Michael Turquette <mturquette@baylibre.com>");
156MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-mpll.h b/drivers/clk/meson/clk-mpll.h
new file mode 100644
index 000000000000..cf79340006dd
--- /dev/null
+++ b/drivers/clk/meson/clk-mpll.h
@@ -0,0 +1,30 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLK_MPLL_H
8#define __MESON_CLK_MPLL_H
9
10#include <linux/clk-provider.h>
11#include <linux/spinlock.h>
12
13#include "parm.h"
14
15struct meson_clk_mpll_data {
16 struct parm sdm;
17 struct parm sdm_en;
18 struct parm n2;
19 struct parm ssen;
20 struct parm misc;
21 spinlock_t *lock;
22 u8 flags;
23};
24
25#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
26
27extern const struct clk_ops meson_clk_mpll_ro_ops;
28extern const struct clk_ops meson_clk_mpll_ops;
29
30#endif /* __MESON_CLK_MPLL_H */
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index cba43748ce3d..80c3ada193a4 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -5,7 +5,10 @@
5 */ 5 */
6 6
7#include <linux/clk-provider.h> 7#include <linux/clk-provider.h>
8#include "clkc.h" 8#include <linux/module.h>
9
10#include "clk-regmap.h"
11#include "clk-phase.h"
9 12
10#define phase_step(_width) (360 / (1 << (_width))) 13#define phase_step(_width) (360 / (1 << (_width)))
11 14
@@ -15,13 +18,12 @@ meson_clk_phase_data(struct clk_regmap *clk)
15 return (struct meson_clk_phase_data *)clk->data; 18 return (struct meson_clk_phase_data *)clk->data;
16} 19}
17 20
18int meson_clk_degrees_from_val(unsigned int val, unsigned int width) 21static int meson_clk_degrees_from_val(unsigned int val, unsigned int width)
19{ 22{
20 return phase_step(width) * val; 23 return phase_step(width) * val;
21} 24}
22EXPORT_SYMBOL_GPL(meson_clk_degrees_from_val);
23 25
24unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width) 26static unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
25{ 27{
26 unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width)); 28 unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width));
27 29
@@ -31,7 +33,6 @@ unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
31 */ 33 */
32 return val % (1 << width); 34 return val % (1 << width);
33} 35}
34EXPORT_SYMBOL_GPL(meson_clk_degrees_to_val);
35 36
36static int meson_clk_phase_get_phase(struct clk_hw *hw) 37static int meson_clk_phase_get_phase(struct clk_hw *hw)
37{ 38{
@@ -61,3 +62,67 @@ const struct clk_ops meson_clk_phase_ops = {
61 .set_phase = meson_clk_phase_set_phase, 62 .set_phase = meson_clk_phase_set_phase,
62}; 63};
63EXPORT_SYMBOL_GPL(meson_clk_phase_ops); 64EXPORT_SYMBOL_GPL(meson_clk_phase_ops);
65
66/*
67 * This is a special clock for the audio controller.
68 * The phase of mst_sclk clock output can be controlled independently
69 * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
70 * Controlling these 3 phases as just one makes things simpler and
71 * give the same clock view to all the element on the i2s bus.
72 * If necessary, we can still control the phase in the tdm block
73 * which makes these independent control redundant.
74 */
75static inline struct meson_clk_triphase_data *
76meson_clk_triphase_data(struct clk_regmap *clk)
77{
78 return (struct meson_clk_triphase_data *)clk->data;
79}
80
81static void meson_clk_triphase_sync(struct clk_hw *hw)
82{
83 struct clk_regmap *clk = to_clk_regmap(hw);
84 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
85 unsigned int val;
86
87 /* Get phase 0 and sync it to phase 1 and 2 */
88 val = meson_parm_read(clk->map, &tph->ph0);
89 meson_parm_write(clk->map, &tph->ph1, val);
90 meson_parm_write(clk->map, &tph->ph2, val);
91}
92
93static int meson_clk_triphase_get_phase(struct clk_hw *hw)
94{
95 struct clk_regmap *clk = to_clk_regmap(hw);
96 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
97 unsigned int val;
98
99 /* Phase are in sync, reading phase 0 is enough */
100 val = meson_parm_read(clk->map, &tph->ph0);
101
102 return meson_clk_degrees_from_val(val, tph->ph0.width);
103}
104
105static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
106{
107 struct clk_regmap *clk = to_clk_regmap(hw);
108 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
109 unsigned int val;
110
111 val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
112 meson_parm_write(clk->map, &tph->ph0, val);
113 meson_parm_write(clk->map, &tph->ph1, val);
114 meson_parm_write(clk->map, &tph->ph2, val);
115
116 return 0;
117}
118
119const struct clk_ops meson_clk_triphase_ops = {
120 .init = meson_clk_triphase_sync,
121 .get_phase = meson_clk_triphase_get_phase,
122 .set_phase = meson_clk_triphase_set_phase,
123};
124EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
125
126MODULE_DESCRIPTION("Amlogic phase driver");
127MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
128MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-phase.h b/drivers/clk/meson/clk-phase.h
new file mode 100644
index 000000000000..5579f9ced142
--- /dev/null
+++ b/drivers/clk/meson/clk-phase.h
@@ -0,0 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLK_PHASE_H
8#define __MESON_CLK_PHASE_H
9
10#include <linux/clk-provider.h>
11#include "parm.h"
12
13struct meson_clk_phase_data {
14 struct parm ph;
15};
16
17struct meson_clk_triphase_data {
18 struct parm ph0;
19 struct parm ph1;
20 struct parm ph2;
21};
22
23extern const struct clk_ops meson_clk_phase_ops;
24extern const struct clk_ops meson_clk_triphase_ops;
25
26#endif /* __MESON_CLK_PHASE_H */
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index afffc1547e20..41e16dd7272a 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -32,11 +32,10 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/math64.h> 33#include <linux/math64.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/of_address.h> 35#include <linux/rational.h>
36#include <linux/slab.h>
37#include <linux/string.h>
38 36
39#include "clkc.h" 37#include "clk-regmap.h"
38#include "clk-pll.h"
40 39
41static inline struct meson_clk_pll_data * 40static inline struct meson_clk_pll_data *
42meson_clk_pll_data(struct clk_regmap *clk) 41meson_clk_pll_data(struct clk_regmap *clk)
@@ -44,12 +43,21 @@ meson_clk_pll_data(struct clk_regmap *clk)
44 return (struct meson_clk_pll_data *)clk->data; 43 return (struct meson_clk_pll_data *)clk->data;
45} 44}
46 45
46static int __pll_round_closest_mult(struct meson_clk_pll_data *pll)
47{
48 if ((pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) &&
49 !MESON_PARM_APPLICABLE(&pll->frac))
50 return 1;
51
52 return 0;
53}
54
47static unsigned long __pll_params_to_rate(unsigned long parent_rate, 55static unsigned long __pll_params_to_rate(unsigned long parent_rate,
48 const struct pll_params_table *pllt, 56 unsigned int m, unsigned int n,
49 u16 frac, 57 unsigned int frac,
50 struct meson_clk_pll_data *pll) 58 struct meson_clk_pll_data *pll)
51{ 59{
52 u64 rate = (u64)parent_rate * pllt->m; 60 u64 rate = (u64)parent_rate * m;
53 61
54 if (frac && MESON_PARM_APPLICABLE(&pll->frac)) { 62 if (frac && MESON_PARM_APPLICABLE(&pll->frac)) {
55 u64 frac_rate = (u64)parent_rate * frac; 63 u64 frac_rate = (u64)parent_rate * frac;
@@ -58,7 +66,7 @@ static unsigned long __pll_params_to_rate(unsigned long parent_rate,
58 (1 << pll->frac.width)); 66 (1 << pll->frac.width));
59 } 67 }
60 68
61 return DIV_ROUND_UP_ULL(rate, pllt->n); 69 return DIV_ROUND_UP_ULL(rate, n);
62} 70}
63 71
64static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, 72static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
@@ -66,35 +74,39 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
66{ 74{
67 struct clk_regmap *clk = to_clk_regmap(hw); 75 struct clk_regmap *clk = to_clk_regmap(hw);
68 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); 76 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
69 struct pll_params_table pllt; 77 unsigned int m, n, frac;
70 u16 frac;
71 78
72 pllt.n = meson_parm_read(clk->map, &pll->n); 79 n = meson_parm_read(clk->map, &pll->n);
73 pllt.m = meson_parm_read(clk->map, &pll->m); 80 m = meson_parm_read(clk->map, &pll->m);
74 81
75 frac = MESON_PARM_APPLICABLE(&pll->frac) ? 82 frac = MESON_PARM_APPLICABLE(&pll->frac) ?
76 meson_parm_read(clk->map, &pll->frac) : 83 meson_parm_read(clk->map, &pll->frac) :
77 0; 84 0;
78 85
79 return __pll_params_to_rate(parent_rate, &pllt, frac, pll); 86 return __pll_params_to_rate(parent_rate, m, n, frac, pll);
80} 87}
81 88
82static u16 __pll_params_with_frac(unsigned long rate, 89static unsigned int __pll_params_with_frac(unsigned long rate,
83 unsigned long parent_rate, 90 unsigned long parent_rate,
84 const struct pll_params_table *pllt, 91 unsigned int m,
85 struct meson_clk_pll_data *pll) 92 unsigned int n,
93 struct meson_clk_pll_data *pll)
86{ 94{
87 u16 frac_max = (1 << pll->frac.width); 95 unsigned int frac_max = (1 << pll->frac.width);
88 u64 val = (u64)rate * pllt->n; 96 u64 val = (u64)rate * n;
97
98 /* Bail out if we are already over the requested rate */
99 if (rate < parent_rate * m / n)
100 return 0;
89 101
90 if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) 102 if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST)
91 val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate); 103 val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate);
92 else 104 else
93 val = div_u64(val * frac_max, parent_rate); 105 val = div_u64(val * frac_max, parent_rate);
94 106
95 val -= pllt->m * frac_max; 107 val -= m * frac_max;
96 108
97 return min((u16)val, (u16)(frac_max - 1)); 109 return min((unsigned int)val, (frac_max - 1));
98} 110}
99 111
100static bool meson_clk_pll_is_better(unsigned long rate, 112static bool meson_clk_pll_is_better(unsigned long rate,
@@ -102,45 +114,123 @@ static bool meson_clk_pll_is_better(unsigned long rate,
102 unsigned long now, 114 unsigned long now,
103 struct meson_clk_pll_data *pll) 115 struct meson_clk_pll_data *pll)
104{ 116{
105 if (!(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) || 117 if (__pll_round_closest_mult(pll)) {
106 MESON_PARM_APPLICABLE(&pll->frac)) {
107 /* Round down */
108 if (now < rate && best < now)
109 return true;
110 } else {
111 /* Round Closest */ 118 /* Round Closest */
112 if (abs(now - rate) < abs(best - rate)) 119 if (abs(now - rate) < abs(best - rate))
113 return true; 120 return true;
121 } else {
122 /* Round down */
123 if (now < rate && best < now)
124 return true;
114 } 125 }
115 126
116 return false; 127 return false;
117} 128}
118 129
119static const struct pll_params_table * 130static int meson_clk_get_pll_table_index(unsigned int index,
120meson_clk_get_pll_settings(unsigned long rate, 131 unsigned int *m,
121 unsigned long parent_rate, 132 unsigned int *n,
122 struct meson_clk_pll_data *pll) 133 struct meson_clk_pll_data *pll)
123{ 134{
124 const struct pll_params_table *table = pll->table; 135 if (!pll->table[index].n)
125 unsigned long best = 0, now = 0; 136 return -EINVAL;
126 unsigned int i, best_i = 0; 137
138 *m = pll->table[index].m;
139 *n = pll->table[index].n;
140
141 return 0;
142}
143
144static unsigned int meson_clk_get_pll_range_m(unsigned long rate,
145 unsigned long parent_rate,
146 unsigned int n,
147 struct meson_clk_pll_data *pll)
148{
149 u64 val = (u64)rate * n;
127 150
128 if (!table) 151 if (__pll_round_closest_mult(pll))
129 return NULL; 152 return DIV_ROUND_CLOSEST_ULL(val, parent_rate);
130 153
131 for (i = 0; table[i].n; i++) { 154 return div_u64(val, parent_rate);
132 now = __pll_params_to_rate(parent_rate, &table[i], 0, pll); 155}
133 156
134 /* If we get an exact match, don't bother any further */ 157static int meson_clk_get_pll_range_index(unsigned long rate,
135 if (now == rate) { 158 unsigned long parent_rate,
136 return &table[i]; 159 unsigned int index,
137 } else if (meson_clk_pll_is_better(rate, best, now, pll)) { 160 unsigned int *m,
161 unsigned int *n,
162 struct meson_clk_pll_data *pll)
163{
164 *n = index + 1;
165
166 /* Check the predivider range */
167 if (*n >= (1 << pll->n.width))
168 return -EINVAL;
169
170 if (*n == 1) {
171 /* Get the boundaries out the way */
172 if (rate <= pll->range->min * parent_rate) {
173 *m = pll->range->min;
174 return -ENODATA;
175 } else if (rate >= pll->range->max * parent_rate) {
176 *m = pll->range->max;
177 return -ENODATA;
178 }
179 }
180
181 *m = meson_clk_get_pll_range_m(rate, parent_rate, *n, pll);
182
183 /* the pre-divider gives a multiplier too big - stop */
184 if (*m >= (1 << pll->m.width))
185 return -EINVAL;
186
187 return 0;
188}
189
190static int meson_clk_get_pll_get_index(unsigned long rate,
191 unsigned long parent_rate,
192 unsigned int index,
193 unsigned int *m,
194 unsigned int *n,
195 struct meson_clk_pll_data *pll)
196{
197 if (pll->range)
198 return meson_clk_get_pll_range_index(rate, parent_rate,
199 index, m, n, pll);
200 else if (pll->table)
201 return meson_clk_get_pll_table_index(index, m, n, pll);
202
203 return -EINVAL;
204}
205
206static int meson_clk_get_pll_settings(unsigned long rate,
207 unsigned long parent_rate,
208 unsigned int *best_m,
209 unsigned int *best_n,
210 struct meson_clk_pll_data *pll)
211{
212 unsigned long best = 0, now = 0;
213 unsigned int i, m, n;
214 int ret;
215
216 for (i = 0, ret = 0; !ret; i++) {
217 ret = meson_clk_get_pll_get_index(rate, parent_rate,
218 i, &m, &n, pll);
219 if (ret == -EINVAL)
220 break;
221
222 now = __pll_params_to_rate(parent_rate, m, n, 0, pll);
223 if (meson_clk_pll_is_better(rate, best, now, pll)) {
138 best = now; 224 best = now;
139 best_i = i; 225 *best_m = m;
226 *best_n = n;
227
228 if (now == rate)
229 break;
140 } 230 }
141 } 231 }
142 232
143 return (struct pll_params_table *)&table[best_i]; 233 return best ? 0 : -EINVAL;
144} 234}
145 235
146static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, 236static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -148,15 +238,15 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
148{ 238{
149 struct clk_regmap *clk = to_clk_regmap(hw); 239 struct clk_regmap *clk = to_clk_regmap(hw);
150 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); 240 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
151 const struct pll_params_table *pllt = 241 unsigned int m, n, frac;
152 meson_clk_get_pll_settings(rate, *parent_rate, pll);
153 unsigned long round; 242 unsigned long round;
154 u16 frac; 243 int ret;
155 244
156 if (!pllt) 245 ret = meson_clk_get_pll_settings(rate, *parent_rate, &m, &n, pll);
246 if (ret)
157 return meson_clk_pll_recalc_rate(hw, *parent_rate); 247 return meson_clk_pll_recalc_rate(hw, *parent_rate);
158 248
159 round = __pll_params_to_rate(*parent_rate, pllt, 0, pll); 249 round = __pll_params_to_rate(*parent_rate, m, n, 0, pll);
160 250
161 if (!MESON_PARM_APPLICABLE(&pll->frac) || rate == round) 251 if (!MESON_PARM_APPLICABLE(&pll->frac) || rate == round)
162 return round; 252 return round;
@@ -165,9 +255,9 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
165 * The rate provided by the setting is not an exact match, let's 255 * The rate provided by the setting is not an exact match, let's
166 * try to improve the result using the fractional parameter 256 * try to improve the result using the fractional parameter
167 */ 257 */
168 frac = __pll_params_with_frac(rate, *parent_rate, pllt, pll); 258 frac = __pll_params_with_frac(rate, *parent_rate, m, n, pll);
169 259
170 return __pll_params_to_rate(*parent_rate, pllt, frac, pll); 260 return __pll_params_to_rate(*parent_rate, m, n, frac, pll);
171} 261}
172 262
173static int meson_clk_pll_wait_lock(struct clk_hw *hw) 263static int meson_clk_pll_wait_lock(struct clk_hw *hw)
@@ -254,30 +344,27 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
254{ 344{
255 struct clk_regmap *clk = to_clk_regmap(hw); 345 struct clk_regmap *clk = to_clk_regmap(hw);
256 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); 346 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
257 const struct pll_params_table *pllt; 347 unsigned int enabled, m, n, frac = 0, ret;
258 unsigned int enabled;
259 unsigned long old_rate; 348 unsigned long old_rate;
260 u16 frac = 0;
261 349
262 if (parent_rate == 0 || rate == 0) 350 if (parent_rate == 0 || rate == 0)
263 return -EINVAL; 351 return -EINVAL;
264 352
265 old_rate = rate; 353 old_rate = rate;
266 354
267 pllt = meson_clk_get_pll_settings(rate, parent_rate, pll); 355 ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
268 if (!pllt) 356 if (ret)
269 return -EINVAL; 357 return ret;
270 358
271 enabled = meson_parm_read(clk->map, &pll->en); 359 enabled = meson_parm_read(clk->map, &pll->en);
272 if (enabled) 360 if (enabled)
273 meson_clk_pll_disable(hw); 361 meson_clk_pll_disable(hw);
274 362
275 meson_parm_write(clk->map, &pll->n, pllt->n); 363 meson_parm_write(clk->map, &pll->n, n);
276 meson_parm_write(clk->map, &pll->m, pllt->m); 364 meson_parm_write(clk->map, &pll->m, m);
277
278 365
279 if (MESON_PARM_APPLICABLE(&pll->frac)) { 366 if (MESON_PARM_APPLICABLE(&pll->frac)) {
280 frac = __pll_params_with_frac(rate, parent_rate, pllt, pll); 367 frac = __pll_params_with_frac(rate, parent_rate, m, n, pll);
281 meson_parm_write(clk->map, &pll->frac, frac); 368 meson_parm_write(clk->map, &pll->frac, frac);
282 } 369 }
283 370
@@ -309,8 +396,15 @@ const struct clk_ops meson_clk_pll_ops = {
309 .enable = meson_clk_pll_enable, 396 .enable = meson_clk_pll_enable,
310 .disable = meson_clk_pll_disable 397 .disable = meson_clk_pll_disable
311}; 398};
399EXPORT_SYMBOL_GPL(meson_clk_pll_ops);
312 400
313const struct clk_ops meson_clk_pll_ro_ops = { 401const struct clk_ops meson_clk_pll_ro_ops = {
314 .recalc_rate = meson_clk_pll_recalc_rate, 402 .recalc_rate = meson_clk_pll_recalc_rate,
315 .is_enabled = meson_clk_pll_is_enabled, 403 .is_enabled = meson_clk_pll_is_enabled,
316}; 404};
405EXPORT_SYMBOL_GPL(meson_clk_pll_ro_ops);
406
407MODULE_DESCRIPTION("Amlogic PLL driver");
408MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
409MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
410MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h
new file mode 100644
index 000000000000..55af2e285b1b
--- /dev/null
+++ b/drivers/clk/meson/clk-pll.h
@@ -0,0 +1,49 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLK_PLL_H
8#define __MESON_CLK_PLL_H
9
10#include <linux/clk-provider.h>
11#include <linux/regmap.h>
12#include "parm.h"
13
14struct pll_params_table {
15 unsigned int m;
16 unsigned int n;
17};
18
19struct pll_mult_range {
20 unsigned int min;
21 unsigned int max;
22};
23
24#define PLL_PARAMS(_m, _n) \
25 { \
26 .m = (_m), \
27 .n = (_n), \
28 }
29
30#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
31
32struct meson_clk_pll_data {
33 struct parm en;
34 struct parm m;
35 struct parm n;
36 struct parm frac;
37 struct parm l;
38 struct parm rst;
39 const struct reg_sequence *init_regs;
40 unsigned int init_count;
41 const struct pll_params_table *table;
42 const struct pll_mult_range *range;
43 u8 flags;
44};
45
46extern const struct clk_ops meson_clk_pll_ro_ops;
47extern const struct clk_ops meson_clk_pll_ops;
48
49#endif /* __MESON_CLK_PLL_H */
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index c515f67322a3..dcd1757cc5df 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -4,6 +4,7 @@
4 * Author: Jerome Brunet <jbrunet@baylibre.com> 4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */ 5 */
6 6
7#include <linux/module.h>
7#include "clk-regmap.h" 8#include "clk-regmap.h"
8 9
9static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable) 10static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable)
@@ -180,3 +181,7 @@ const struct clk_ops clk_regmap_mux_ro_ops = {
180 .get_parent = clk_regmap_mux_get_parent, 181 .get_parent = clk_regmap_mux_get_parent,
181}; 182};
182EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops); 183EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops);
184
185MODULE_DESCRIPTION("Amlogic regmap backed clock driver");
186MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
187MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index e9c5728d40eb..1dd0abe3ba91 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -111,4 +111,24 @@ clk_get_regmap_mux_data(struct clk_regmap *clk)
111extern const struct clk_ops clk_regmap_mux_ops; 111extern const struct clk_ops clk_regmap_mux_ops;
112extern const struct clk_ops clk_regmap_mux_ro_ops; 112extern const struct clk_ops clk_regmap_mux_ro_ops;
113 113
114#define __MESON_GATE(_name, _reg, _bit, _ops) \
115struct clk_regmap _name = { \
116 .data = &(struct clk_regmap_gate_data){ \
117 .offset = (_reg), \
118 .bit_idx = (_bit), \
119 }, \
120 .hw.init = &(struct clk_init_data) { \
121 .name = #_name, \
122 .ops = _ops, \
123 .parent_names = (const char *[]){ "clk81" }, \
124 .num_parents = 1, \
125 .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
126 }, \
127}
128
129#define MESON_GATE(_name, _reg, _bit) \
130 __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ops)
131
132#define MESON_GATE_RO(_name, _reg, _bit) \
133 __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ro_ops)
114#endif /* __CLK_REGMAP_H */ 134#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/clk-triphase.c b/drivers/clk/meson/clk-triphase.c
deleted file mode 100644
index 4a59936251e5..000000000000
--- a/drivers/clk/meson/clk-triphase.c
+++ /dev/null
@@ -1,68 +0,0 @@
1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2/*
3 * Copyright (c) 2018 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#include <linux/clk-provider.h>
8#include "clkc-audio.h"
9
10/*
11 * This is a special clock for the audio controller.
12 * The phase of mst_sclk clock output can be controlled independently
13 * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
14 * Controlling these 3 phases as just one makes things simpler and
15 * give the same clock view to all the element on the i2s bus.
16 * If necessary, we can still control the phase in the tdm block
17 * which makes these independent control redundant.
18 */
19static inline struct meson_clk_triphase_data *
20meson_clk_triphase_data(struct clk_regmap *clk)
21{
22 return (struct meson_clk_triphase_data *)clk->data;
23}
24
25static void meson_clk_triphase_sync(struct clk_hw *hw)
26{
27 struct clk_regmap *clk = to_clk_regmap(hw);
28 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
29 unsigned int val;
30
31 /* Get phase 0 and sync it to phase 1 and 2 */
32 val = meson_parm_read(clk->map, &tph->ph0);
33 meson_parm_write(clk->map, &tph->ph1, val);
34 meson_parm_write(clk->map, &tph->ph2, val);
35}
36
37static int meson_clk_triphase_get_phase(struct clk_hw *hw)
38{
39 struct clk_regmap *clk = to_clk_regmap(hw);
40 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
41 unsigned int val;
42
43 /* Phase are in sync, reading phase 0 is enough */
44 val = meson_parm_read(clk->map, &tph->ph0);
45
46 return meson_clk_degrees_from_val(val, tph->ph0.width);
47}
48
49static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
50{
51 struct clk_regmap *clk = to_clk_regmap(hw);
52 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
53 unsigned int val;
54
55 val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
56 meson_parm_write(clk->map, &tph->ph0, val);
57 meson_parm_write(clk->map, &tph->ph1, val);
58 meson_parm_write(clk->map, &tph->ph2, val);
59
60 return 0;
61}
62
63const struct clk_ops meson_clk_triphase_ops = {
64 .init = meson_clk_triphase_sync,
65 .get_phase = meson_clk_triphase_get_phase,
66 .set_phase = meson_clk_triphase_set_phase,
67};
68EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
deleted file mode 100644
index 6183b22c4bf2..000000000000
--- a/drivers/clk/meson/clkc.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2015 Endless Mobile, Inc.
4 * Author: Carlo Caione <carlo@endlessm.com>
5 */
6
7#ifndef __CLKC_H
8#define __CLKC_H
9
10#include <linux/clk-provider.h>
11#include "clk-regmap.h"
12
13#define PMASK(width) GENMASK(width - 1, 0)
14#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift)
15#define CLRPMASK(width, shift) (~SETPMASK(width, shift))
16
17#define PARM_GET(width, shift, reg) \
18 (((reg) & SETPMASK(width, shift)) >> (shift))
19#define PARM_SET(width, shift, reg, val) \
20 (((reg) & CLRPMASK(width, shift)) | ((val) << (shift)))
21
22#define MESON_PARM_APPLICABLE(p) (!!((p)->width))
23
24struct parm {
25 u16 reg_off;
26 u8 shift;
27 u8 width;
28};
29
30static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
31{
32 unsigned int val;
33
34 regmap_read(map, p->reg_off, &val);
35 return PARM_GET(p->width, p->shift, val);
36}
37
38static inline void meson_parm_write(struct regmap *map, struct parm *p,
39 unsigned int val)
40{
41 regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
42 val << p->shift);
43}
44
45
46struct pll_params_table {
47 u16 m;
48 u16 n;
49};
50
51#define PLL_PARAMS(_m, _n) \
52 { \
53 .m = (_m), \
54 .n = (_n), \
55 }
56
57#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
58
59struct meson_clk_pll_data {
60 struct parm en;
61 struct parm m;
62 struct parm n;
63 struct parm frac;
64 struct parm l;
65 struct parm rst;
66 const struct reg_sequence *init_regs;
67 unsigned int init_count;
68 const struct pll_params_table *table;
69 u8 flags;
70};
71
72#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
73
74struct meson_clk_mpll_data {
75 struct parm sdm;
76 struct parm sdm_en;
77 struct parm n2;
78 struct parm ssen;
79 struct parm misc;
80 spinlock_t *lock;
81 u8 flags;
82};
83
84#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
85
86struct meson_clk_phase_data {
87 struct parm ph;
88};
89
90int meson_clk_degrees_from_val(unsigned int val, unsigned int width);
91unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width);
92
93struct meson_vid_pll_div_data {
94 struct parm val;
95 struct parm sel;
96};
97
98#define MESON_GATE(_name, _reg, _bit) \
99struct clk_regmap _name = { \
100 .data = &(struct clk_regmap_gate_data){ \
101 .offset = (_reg), \
102 .bit_idx = (_bit), \
103 }, \
104 .hw.init = &(struct clk_init_data) { \
105 .name = #_name, \
106 .ops = &clk_regmap_gate_ops, \
107 .parent_names = (const char *[]){ "clk81" }, \
108 .num_parents = 1, \
109 .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
110 }, \
111};
112
113/* clk_ops */
114extern const struct clk_ops meson_clk_pll_ro_ops;
115extern const struct clk_ops meson_clk_pll_ops;
116extern const struct clk_ops meson_clk_cpu_ops;
117extern const struct clk_ops meson_clk_mpll_ro_ops;
118extern const struct clk_ops meson_clk_mpll_ops;
119extern const struct clk_ops meson_clk_phase_ops;
120extern const struct clk_ops meson_vid_pll_div_ro_ops;
121
122struct clk_hw *meson_clk_hw_register_input(struct device *dev,
123 const char *of_name,
124 const char *clk_name,
125 unsigned long flags);
126
127#endif /* __CLKC_H */
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
new file mode 100644
index 000000000000..1994e735396b
--- /dev/null
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -0,0 +1,454 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Amlogic Meson-AXG Clock Controller Driver
4 *
5 * Copyright (c) 2016 Baylibre SAS.
6 * Author: Michael Turquette <mturquette@baylibre.com>
7 *
8 * Copyright (c) 2019 Baylibre SAS.
9 * Author: Neil Armstrong <narmstrong@baylibre.com>
10 */
11#include <linux/clk-provider.h>
12#include <linux/platform_device.h>
13#include <linux/reset-controller.h>
14#include <linux/mfd/syscon.h>
15#include "meson-aoclk.h"
16#include "g12a-aoclk.h"
17
18#include "clk-regmap.h"
19#include "clk-dualdiv.h"
20
21#define IN_PREFIX "ao-in-"
22
23/*
24 * AO Configuration Clock registers offsets
25 * Register offsets from the data sheet must be multiplied by 4.
26 */
27#define AO_RTI_STATUS_REG3 0x0C
28#define AO_RTI_PWR_CNTL_REG0 0x10
29#define AO_RTI_GEN_CNTL_REG0 0x40
30#define AO_CLK_GATE0 0x4c
31#define AO_CLK_GATE0_SP 0x50
32#define AO_OSCIN_CNTL 0x58
33#define AO_CEC_CLK_CNTL_REG0 0x74
34#define AO_CEC_CLK_CNTL_REG1 0x78
35#define AO_SAR_CLK 0x90
36#define AO_RTC_ALT_CLK_CNTL0 0x94
37#define AO_RTC_ALT_CLK_CNTL1 0x98
38
39/*
40 * Like every other peripheral clock gate in Amlogic Clock drivers,
41 * we are using CLK_IGNORE_UNUSED here, so we keep the state of the
42 * bootloader. The goal is to remove this flag at some point.
43 * Actually removing it will require some extensive test to be done safely.
44 */
45#define AXG_AO_GATE(_name, _reg, _bit) \
46static struct clk_regmap g12a_aoclk_##_name = { \
47 .data = &(struct clk_regmap_gate_data) { \
48 .offset = (_reg), \
49 .bit_idx = (_bit), \
50 }, \
51 .hw.init = &(struct clk_init_data) { \
52 .name = "g12a_ao_" #_name, \
53 .ops = &clk_regmap_gate_ops, \
54 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
55 .num_parents = 1, \
56 .flags = CLK_IGNORE_UNUSED, \
57 }, \
58}
59
60AXG_AO_GATE(ahb, AO_CLK_GATE0, 0);
61AXG_AO_GATE(ir_in, AO_CLK_GATE0, 1);
62AXG_AO_GATE(i2c_m0, AO_CLK_GATE0, 2);
63AXG_AO_GATE(i2c_s0, AO_CLK_GATE0, 3);
64AXG_AO_GATE(uart, AO_CLK_GATE0, 4);
65AXG_AO_GATE(prod_i2c, AO_CLK_GATE0, 5);
66AXG_AO_GATE(uart2, AO_CLK_GATE0, 6);
67AXG_AO_GATE(ir_out, AO_CLK_GATE0, 7);
68AXG_AO_GATE(saradc, AO_CLK_GATE0, 8);
69AXG_AO_GATE(mailbox, AO_CLK_GATE0_SP, 0);
70AXG_AO_GATE(m3, AO_CLK_GATE0_SP, 1);
71AXG_AO_GATE(ahb_sram, AO_CLK_GATE0_SP, 2);
72AXG_AO_GATE(rti, AO_CLK_GATE0_SP, 3);
73AXG_AO_GATE(m4_fclk, AO_CLK_GATE0_SP, 4);
74AXG_AO_GATE(m4_hclk, AO_CLK_GATE0_SP, 5);
75
76static struct clk_regmap g12a_aoclk_cts_oscin = {
77 .data = &(struct clk_regmap_gate_data){
78 .offset = AO_RTI_PWR_CNTL_REG0,
79 .bit_idx = 14,
80 },
81 .hw.init = &(struct clk_init_data){
82 .name = "cts_oscin",
83 .ops = &clk_regmap_gate_ro_ops,
84 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
85 .num_parents = 1,
86 },
87};
88
89static const struct meson_clk_dualdiv_param g12a_32k_div_table[] = {
90 {
91 .dual = 1,
92 .n1 = 733,
93 .m1 = 8,
94 .n2 = 732,
95 .m2 = 11,
96 }, {}
97};
98
99/* 32k_by_oscin clock */
100
101static struct clk_regmap g12a_aoclk_32k_by_oscin_pre = {
102 .data = &(struct clk_regmap_gate_data){
103 .offset = AO_RTC_ALT_CLK_CNTL0,
104 .bit_idx = 31,
105 },
106 .hw.init = &(struct clk_init_data){
107 .name = "g12a_ao_32k_by_oscin_pre",
108 .ops = &clk_regmap_gate_ops,
109 .parent_names = (const char *[]){ "cts_oscin" },
110 .num_parents = 1,
111 },
112};
113
114static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
115 .data = &(struct meson_clk_dualdiv_data){
116 .n1 = {
117 .reg_off = AO_RTC_ALT_CLK_CNTL0,
118 .shift = 0,
119 .width = 12,
120 },
121 .n2 = {
122 .reg_off = AO_RTC_ALT_CLK_CNTL0,
123 .shift = 12,
124 .width = 12,
125 },
126 .m1 = {
127 .reg_off = AO_RTC_ALT_CLK_CNTL1,
128 .shift = 0,
129 .width = 12,
130 },
131 .m2 = {
132 .reg_off = AO_RTC_ALT_CLK_CNTL1,
133 .shift = 12,
134 .width = 12,
135 },
136 .dual = {
137 .reg_off = AO_RTC_ALT_CLK_CNTL0,
138 .shift = 28,
139 .width = 1,
140 },
141 .table = g12a_32k_div_table,
142 },
143 .hw.init = &(struct clk_init_data){
144 .name = "g12a_ao_32k_by_oscin_div",
145 .ops = &meson_clk_dualdiv_ops,
146 .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_pre" },
147 .num_parents = 1,
148 },
149};
150
151static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
152 .data = &(struct clk_regmap_mux_data) {
153 .offset = AO_RTC_ALT_CLK_CNTL1,
154 .mask = 0x1,
155 .shift = 24,
156 .flags = CLK_MUX_ROUND_CLOSEST,
157 },
158 .hw.init = &(struct clk_init_data){
159 .name = "g12a_ao_32k_by_oscin_sel",
160 .ops = &clk_regmap_mux_ops,
161 .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_div",
162 "g12a_ao_32k_by_oscin_pre" },
163 .num_parents = 2,
164 .flags = CLK_SET_RATE_PARENT,
165 },
166};
167
168static struct clk_regmap g12a_aoclk_32k_by_oscin = {
169 .data = &(struct clk_regmap_gate_data){
170 .offset = AO_RTC_ALT_CLK_CNTL0,
171 .bit_idx = 30,
172 },
173 .hw.init = &(struct clk_init_data){
174 .name = "g12a_ao_32k_by_oscin",
175 .ops = &clk_regmap_gate_ops,
176 .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_sel" },
177 .num_parents = 1,
178 .flags = CLK_SET_RATE_PARENT,
179 },
180};
181
182/* cec clock */
183
184static struct clk_regmap g12a_aoclk_cec_pre = {
185 .data = &(struct clk_regmap_gate_data){
186 .offset = AO_CEC_CLK_CNTL_REG0,
187 .bit_idx = 31,
188 },
189 .hw.init = &(struct clk_init_data){
190 .name = "g12a_ao_cec_pre",
191 .ops = &clk_regmap_gate_ops,
192 .parent_names = (const char *[]){ "cts_oscin" },
193 .num_parents = 1,
194 },
195};
196
197static struct clk_regmap g12a_aoclk_cec_div = {
198 .data = &(struct meson_clk_dualdiv_data){
199 .n1 = {
200 .reg_off = AO_CEC_CLK_CNTL_REG0,
201 .shift = 0,
202 .width = 12,
203 },
204 .n2 = {
205 .reg_off = AO_CEC_CLK_CNTL_REG0,
206 .shift = 12,
207 .width = 12,
208 },
209 .m1 = {
210 .reg_off = AO_CEC_CLK_CNTL_REG1,
211 .shift = 0,
212 .width = 12,
213 },
214 .m2 = {
215 .reg_off = AO_CEC_CLK_CNTL_REG1,
216 .shift = 12,
217 .width = 12,
218 },
219 .dual = {
220 .reg_off = AO_CEC_CLK_CNTL_REG0,
221 .shift = 28,
222 .width = 1,
223 },
224 .table = g12a_32k_div_table,
225 },
226 .hw.init = &(struct clk_init_data){
227 .name = "g12a_ao_cec_div",
228 .ops = &meson_clk_dualdiv_ops,
229 .parent_names = (const char *[]){ "g12a_ao_cec_pre" },
230 .num_parents = 1,
231 },
232};
233
234static struct clk_regmap g12a_aoclk_cec_sel = {
235 .data = &(struct clk_regmap_mux_data) {
236 .offset = AO_CEC_CLK_CNTL_REG1,
237 .mask = 0x1,
238 .shift = 24,
239 .flags = CLK_MUX_ROUND_CLOSEST,
240 },
241 .hw.init = &(struct clk_init_data){
242 .name = "g12a_ao_cec_sel",
243 .ops = &clk_regmap_mux_ops,
244 .parent_names = (const char *[]){ "g12a_ao_cec_div",
245 "g12a_ao_cec_pre" },
246 .num_parents = 2,
247 .flags = CLK_SET_RATE_PARENT,
248 },
249};
250
251static struct clk_regmap g12a_aoclk_cec = {
252 .data = &(struct clk_regmap_gate_data){
253 .offset = AO_CEC_CLK_CNTL_REG0,
254 .bit_idx = 30,
255 },
256 .hw.init = &(struct clk_init_data){
257 .name = "g12a_ao_cec",
258 .ops = &clk_regmap_gate_ops,
259 .parent_names = (const char *[]){ "g12a_ao_cec_sel" },
260 .num_parents = 1,
261 .flags = CLK_SET_RATE_PARENT,
262 },
263};
264
265static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
266 .data = &(struct clk_regmap_mux_data) {
267 .offset = AO_RTI_PWR_CNTL_REG0,
268 .mask = 0x1,
269 .shift = 10,
270 .flags = CLK_MUX_ROUND_CLOSEST,
271 },
272 .hw.init = &(struct clk_init_data){
273 .name = "g12a_ao_cts_rtc_oscin",
274 .ops = &clk_regmap_mux_ops,
275 .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin",
276 IN_PREFIX "ext_32k-0" },
277 .num_parents = 2,
278 .flags = CLK_SET_RATE_PARENT,
279 },
280};
281
282static struct clk_regmap g12a_aoclk_clk81 = {
283 .data = &(struct clk_regmap_mux_data) {
284 .offset = AO_RTI_PWR_CNTL_REG0,
285 .mask = 0x1,
286 .shift = 8,
287 .flags = CLK_MUX_ROUND_CLOSEST,
288 },
289 .hw.init = &(struct clk_init_data){
290 .name = "g12a_ao_clk81",
291 .ops = &clk_regmap_mux_ro_ops,
292 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
293 "g12a_ao_cts_rtc_oscin"},
294 .num_parents = 2,
295 .flags = CLK_SET_RATE_PARENT,
296 },
297};
298
299static struct clk_regmap g12a_aoclk_saradc_mux = {
300 .data = &(struct clk_regmap_mux_data) {
301 .offset = AO_SAR_CLK,
302 .mask = 0x3,
303 .shift = 9,
304 },
305 .hw.init = &(struct clk_init_data){
306 .name = "g12a_ao_saradc_mux",
307 .ops = &clk_regmap_mux_ops,
308 .parent_names = (const char *[]){ IN_PREFIX "xtal",
309 "g12a_ao_clk81" },
310 .num_parents = 2,
311 },
312};
313
314static struct clk_regmap g12a_aoclk_saradc_div = {
315 .data = &(struct clk_regmap_div_data) {
316 .offset = AO_SAR_CLK,
317 .shift = 0,
318 .width = 8,
319 },
320 .hw.init = &(struct clk_init_data){
321 .name = "g12a_ao_saradc_div",
322 .ops = &clk_regmap_divider_ops,
323 .parent_names = (const char *[]){ "g12a_ao_saradc_mux" },
324 .num_parents = 1,
325 .flags = CLK_SET_RATE_PARENT,
326 },
327};
328
329static struct clk_regmap g12a_aoclk_saradc_gate = {
330 .data = &(struct clk_regmap_gate_data) {
331 .offset = AO_SAR_CLK,
332 .bit_idx = 8,
333 },
334 .hw.init = &(struct clk_init_data){
335 .name = "g12a_ao_saradc_gate",
336 .ops = &clk_regmap_gate_ops,
337 .parent_names = (const char *[]){ "g12a_ao_saradc_div" },
338 .num_parents = 1,
339 .flags = CLK_SET_RATE_PARENT,
340 },
341};
342
343static const unsigned int g12a_aoclk_reset[] = {
344 [RESET_AO_IR_IN] = 16,
345 [RESET_AO_UART] = 17,
346 [RESET_AO_I2C_M] = 18,
347 [RESET_AO_I2C_S] = 19,
348 [RESET_AO_SAR_ADC] = 20,
349 [RESET_AO_UART2] = 22,
350 [RESET_AO_IR_OUT] = 23,
351};
352
353static struct clk_regmap *g12a_aoclk_regmap[] = {
354 &g12a_aoclk_ahb,
355 &g12a_aoclk_ir_in,
356 &g12a_aoclk_i2c_m0,
357 &g12a_aoclk_i2c_s0,
358 &g12a_aoclk_uart,
359 &g12a_aoclk_prod_i2c,
360 &g12a_aoclk_uart2,
361 &g12a_aoclk_ir_out,
362 &g12a_aoclk_saradc,
363 &g12a_aoclk_mailbox,
364 &g12a_aoclk_m3,
365 &g12a_aoclk_ahb_sram,
366 &g12a_aoclk_rti,
367 &g12a_aoclk_m4_fclk,
368 &g12a_aoclk_m4_hclk,
369 &g12a_aoclk_cts_oscin,
370 &g12a_aoclk_32k_by_oscin_pre,
371 &g12a_aoclk_32k_by_oscin_div,
372 &g12a_aoclk_32k_by_oscin_sel,
373 &g12a_aoclk_32k_by_oscin,
374 &g12a_aoclk_cec_pre,
375 &g12a_aoclk_cec_div,
376 &g12a_aoclk_cec_sel,
377 &g12a_aoclk_cec,
378 &g12a_aoclk_cts_rtc_oscin,
379 &g12a_aoclk_clk81,
380 &g12a_aoclk_saradc_mux,
381 &g12a_aoclk_saradc_div,
382 &g12a_aoclk_saradc_gate,
383};
384
385static const struct clk_hw_onecell_data g12a_aoclk_onecell_data = {
386 .hws = {
387 [CLKID_AO_AHB] = &g12a_aoclk_ahb.hw,
388 [CLKID_AO_IR_IN] = &g12a_aoclk_ir_in.hw,
389 [CLKID_AO_I2C_M0] = &g12a_aoclk_i2c_m0.hw,
390 [CLKID_AO_I2C_S0] = &g12a_aoclk_i2c_s0.hw,
391 [CLKID_AO_UART] = &g12a_aoclk_uart.hw,
392 [CLKID_AO_PROD_I2C] = &g12a_aoclk_prod_i2c.hw,
393 [CLKID_AO_UART2] = &g12a_aoclk_uart2.hw,
394 [CLKID_AO_IR_OUT] = &g12a_aoclk_ir_out.hw,
395 [CLKID_AO_SAR_ADC] = &g12a_aoclk_saradc.hw,
396 [CLKID_AO_MAILBOX] = &g12a_aoclk_mailbox.hw,
397 [CLKID_AO_M3] = &g12a_aoclk_m3.hw,
398 [CLKID_AO_AHB_SRAM] = &g12a_aoclk_ahb_sram.hw,
399 [CLKID_AO_RTI] = &g12a_aoclk_rti.hw,
400 [CLKID_AO_M4_FCLK] = &g12a_aoclk_m4_fclk.hw,
401 [CLKID_AO_M4_HCLK] = &g12a_aoclk_m4_hclk.hw,
402 [CLKID_AO_CLK81] = &g12a_aoclk_clk81.hw,
403 [CLKID_AO_SAR_ADC_SEL] = &g12a_aoclk_saradc_mux.hw,
404 [CLKID_AO_SAR_ADC_DIV] = &g12a_aoclk_saradc_div.hw,
405 [CLKID_AO_SAR_ADC_CLK] = &g12a_aoclk_saradc_gate.hw,
406 [CLKID_AO_CTS_OSCIN] = &g12a_aoclk_cts_oscin.hw,
407 [CLKID_AO_32K_PRE] = &g12a_aoclk_32k_by_oscin_pre.hw,
408 [CLKID_AO_32K_DIV] = &g12a_aoclk_32k_by_oscin_div.hw,
409 [CLKID_AO_32K_SEL] = &g12a_aoclk_32k_by_oscin_sel.hw,
410 [CLKID_AO_32K] = &g12a_aoclk_32k_by_oscin.hw,
411 [CLKID_AO_CEC_PRE] = &g12a_aoclk_cec_pre.hw,
412 [CLKID_AO_CEC_DIV] = &g12a_aoclk_cec_div.hw,
413 [CLKID_AO_CEC_SEL] = &g12a_aoclk_cec_sel.hw,
414 [CLKID_AO_CEC] = &g12a_aoclk_cec.hw,
415 [CLKID_AO_CTS_RTC_OSCIN] = &g12a_aoclk_cts_rtc_oscin.hw,
416 },
417 .num = NR_CLKS,
418};
419
420static const struct meson_aoclk_input g12a_aoclk_inputs[] = {
421 { .name = "xtal", .required = true },
422 { .name = "mpeg-clk", .required = true },
423 { .name = "ext-32k-0", .required = false },
424};
425
426static const struct meson_aoclk_data g12a_aoclkc_data = {
427 .reset_reg = AO_RTI_GEN_CNTL_REG0,
428 .num_reset = ARRAY_SIZE(g12a_aoclk_reset),
429 .reset = g12a_aoclk_reset,
430 .num_clks = ARRAY_SIZE(g12a_aoclk_regmap),
431 .clks = g12a_aoclk_regmap,
432 .hw_data = &g12a_aoclk_onecell_data,
433 .inputs = g12a_aoclk_inputs,
434 .num_inputs = ARRAY_SIZE(g12a_aoclk_inputs),
435 .input_prefix = IN_PREFIX,
436};
437
438static const struct of_device_id g12a_aoclkc_match_table[] = {
439 {
440 .compatible = "amlogic,meson-g12a-aoclkc",
441 .data = &g12a_aoclkc_data,
442 },
443 { }
444};
445
446static struct platform_driver g12a_aoclkc_driver = {
447 .probe = meson_aoclkc_probe,
448 .driver = {
449 .name = "g12a-aoclkc",
450 .of_match_table = g12a_aoclkc_match_table,
451 },
452};
453
454builtin_platform_driver(g12a_aoclkc_driver);
diff --git a/drivers/clk/meson/g12a-aoclk.h b/drivers/clk/meson/g12a-aoclk.h
new file mode 100644
index 000000000000..04b0d5506641
--- /dev/null
+++ b/drivers/clk/meson/g12a-aoclk.h
@@ -0,0 +1,34 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
2/*
3 * Copyright (c) 2019 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 */
6
7#ifndef __G12A_AOCLKC_H
8#define __G12A_AOCLKC_H
9
10/*
11 * CLKID index values
12 *
13 * These indices are entirely contrived and do not map onto the hardware.
14 * It has now been decided to expose everything by default in the DT header:
15 * include/dt-bindings/clock/g12a-aoclkc.h. Only the clocks ids we don't want
16 * to expose, such as the internal muxes and dividers of composite clocks,
17 * will remain defined here.
18 */
19#define CLKID_AO_SAR_ADC_SEL 16
20#define CLKID_AO_SAR_ADC_DIV 17
21#define CLKID_AO_CTS_OSCIN 19
22#define CLKID_AO_32K_PRE 20
23#define CLKID_AO_32K_DIV 21
24#define CLKID_AO_32K_SEL 22
25#define CLKID_AO_CEC_PRE 24
26#define CLKID_AO_CEC_DIV 25
27#define CLKID_AO_CEC_SEL 26
28
29#define NR_CLKS 29
30
31#include <dt-bindings/clock/g12a-aoclkc.h>
32#include <dt-bindings/reset/g12a-aoclkc.h>
33
34#endif /* __G12A_AOCLKC_H */
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
new file mode 100644
index 000000000000..0e1ce8c03259
--- /dev/null
+++ b/drivers/clk/meson/g12a.c
@@ -0,0 +1,2359 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Amlogic Meson-G12A Clock Controller Driver
4 *
5 * Copyright (c) 2016 Baylibre SAS.
6 * Author: Michael Turquette <mturquette@baylibre.com>
7 *
8 * Copyright (c) 2018 Amlogic, inc.
9 * Author: Qiufang Dai <qiufang.dai@amlogic.com>
10 * Author: Jian Hu <jian.hu@amlogic.com>
11 */
12
13#include <linux/clk-provider.h>
14#include <linux/init.h>
15#include <linux/of_device.h>
16#include <linux/platform_device.h>
17
18#include "clk-input.h"
19#include "clk-mpll.h"
20#include "clk-pll.h"
21#include "clk-regmap.h"
22#include "vid-pll-div.h"
23#include "meson-eeclk.h"
24#include "g12a.h"
25
26static DEFINE_SPINLOCK(meson_clk_lock);
27
28static struct clk_regmap g12a_fixed_pll_dco = {
29 .data = &(struct meson_clk_pll_data){
30 .en = {
31 .reg_off = HHI_FIX_PLL_CNTL0,
32 .shift = 28,
33 .width = 1,
34 },
35 .m = {
36 .reg_off = HHI_FIX_PLL_CNTL0,
37 .shift = 0,
38 .width = 8,
39 },
40 .n = {
41 .reg_off = HHI_FIX_PLL_CNTL0,
42 .shift = 10,
43 .width = 5,
44 },
45 .frac = {
46 .reg_off = HHI_FIX_PLL_CNTL1,
47 .shift = 0,
48 .width = 17,
49 },
50 .l = {
51 .reg_off = HHI_FIX_PLL_CNTL0,
52 .shift = 31,
53 .width = 1,
54 },
55 .rst = {
56 .reg_off = HHI_FIX_PLL_CNTL0,
57 .shift = 29,
58 .width = 1,
59 },
60 },
61 .hw.init = &(struct clk_init_data){
62 .name = "fixed_pll_dco",
63 .ops = &meson_clk_pll_ro_ops,
64 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
65 .num_parents = 1,
66 },
67};
68
69static struct clk_regmap g12a_fixed_pll = {
70 .data = &(struct clk_regmap_div_data){
71 .offset = HHI_FIX_PLL_CNTL0,
72 .shift = 16,
73 .width = 2,
74 .flags = CLK_DIVIDER_POWER_OF_TWO,
75 },
76 .hw.init = &(struct clk_init_data){
77 .name = "fixed_pll",
78 .ops = &clk_regmap_divider_ro_ops,
79 .parent_names = (const char *[]){ "fixed_pll_dco" },
80 .num_parents = 1,
81 /*
82 * This clock won't ever change at runtime so
83 * CLK_SET_RATE_PARENT is not required
84 */
85 },
86};
87
88/*
89 * Internal sys pll emulation configuration parameters
90 */
91static const struct reg_sequence g12a_sys_init_regs[] = {
92 { .reg = HHI_SYS_PLL_CNTL1, .def = 0x00000000 },
93 { .reg = HHI_SYS_PLL_CNTL2, .def = 0x00000000 },
94 { .reg = HHI_SYS_PLL_CNTL3, .def = 0x48681c00 },
95 { .reg = HHI_SYS_PLL_CNTL4, .def = 0x88770290 },
96 { .reg = HHI_SYS_PLL_CNTL5, .def = 0x39272000 },
97 { .reg = HHI_SYS_PLL_CNTL6, .def = 0x56540000 },
98};
99
100static struct clk_regmap g12a_sys_pll_dco = {
101 .data = &(struct meson_clk_pll_data){
102 .en = {
103 .reg_off = HHI_SYS_PLL_CNTL0,
104 .shift = 28,
105 .width = 1,
106 },
107 .m = {
108 .reg_off = HHI_SYS_PLL_CNTL0,
109 .shift = 0,
110 .width = 8,
111 },
112 .n = {
113 .reg_off = HHI_SYS_PLL_CNTL0,
114 .shift = 10,
115 .width = 5,
116 },
117 .l = {
118 .reg_off = HHI_SYS_PLL_CNTL0,
119 .shift = 31,
120 .width = 1,
121 },
122 .rst = {
123 .reg_off = HHI_SYS_PLL_CNTL0,
124 .shift = 29,
125 .width = 1,
126 },
127 .init_regs = g12a_sys_init_regs,
128 .init_count = ARRAY_SIZE(g12a_sys_init_regs),
129 },
130 .hw.init = &(struct clk_init_data){
131 .name = "sys_pll_dco",
132 .ops = &meson_clk_pll_ro_ops,
133 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
134 .num_parents = 1,
135 },
136};
137
138static struct clk_regmap g12a_sys_pll = {
139 .data = &(struct clk_regmap_div_data){
140 .offset = HHI_SYS_PLL_CNTL0,
141 .shift = 16,
142 .width = 3,
143 .flags = CLK_DIVIDER_POWER_OF_TWO,
144 },
145 .hw.init = &(struct clk_init_data){
146 .name = "sys_pll",
147 .ops = &clk_regmap_divider_ro_ops,
148 .parent_names = (const char *[]){ "sys_pll_dco" },
149 .num_parents = 1,
150 },
151};
152
153static const struct pll_mult_range g12a_gp0_pll_mult_range = {
154 .min = 55,
155 .max = 255,
156};
157
158/*
159 * Internal gp0 pll emulation configuration parameters
160 */
161static const struct reg_sequence g12a_gp0_init_regs[] = {
162 { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
163 { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
164 { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
165 { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
166 { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
167 { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
168};
169
170static struct clk_regmap g12a_gp0_pll_dco = {
171 .data = &(struct meson_clk_pll_data){
172 .en = {
173 .reg_off = HHI_GP0_PLL_CNTL0,
174 .shift = 28,
175 .width = 1,
176 },
177 .m = {
178 .reg_off = HHI_GP0_PLL_CNTL0,
179 .shift = 0,
180 .width = 8,
181 },
182 .n = {
183 .reg_off = HHI_GP0_PLL_CNTL0,
184 .shift = 10,
185 .width = 5,
186 },
187 .frac = {
188 .reg_off = HHI_GP0_PLL_CNTL1,
189 .shift = 0,
190 .width = 17,
191 },
192 .l = {
193 .reg_off = HHI_GP0_PLL_CNTL0,
194 .shift = 31,
195 .width = 1,
196 },
197 .rst = {
198 .reg_off = HHI_GP0_PLL_CNTL0,
199 .shift = 29,
200 .width = 1,
201 },
202 .range = &g12a_gp0_pll_mult_range,
203 .init_regs = g12a_gp0_init_regs,
204 .init_count = ARRAY_SIZE(g12a_gp0_init_regs),
205 },
206 .hw.init = &(struct clk_init_data){
207 .name = "gp0_pll_dco",
208 .ops = &meson_clk_pll_ops,
209 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
210 .num_parents = 1,
211 },
212};
213
214static struct clk_regmap g12a_gp0_pll = {
215 .data = &(struct clk_regmap_div_data){
216 .offset = HHI_GP0_PLL_CNTL0,
217 .shift = 16,
218 .width = 3,
219 .flags = (CLK_DIVIDER_POWER_OF_TWO |
220 CLK_DIVIDER_ROUND_CLOSEST),
221 },
222 .hw.init = &(struct clk_init_data){
223 .name = "gp0_pll",
224 .ops = &clk_regmap_divider_ops,
225 .parent_names = (const char *[]){ "gp0_pll_dco" },
226 .num_parents = 1,
227 .flags = CLK_SET_RATE_PARENT,
228 },
229};
230
231/*
232 * Internal hifi pll emulation configuration parameters
233 */
234static const struct reg_sequence g12a_hifi_init_regs[] = {
235 { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
236 { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
237 { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
238 { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
239 { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
240 { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
241};
242
243static struct clk_regmap g12a_hifi_pll_dco = {
244 .data = &(struct meson_clk_pll_data){
245 .en = {
246 .reg_off = HHI_HIFI_PLL_CNTL0,
247 .shift = 28,
248 .width = 1,
249 },
250 .m = {
251 .reg_off = HHI_HIFI_PLL_CNTL0,
252 .shift = 0,
253 .width = 8,
254 },
255 .n = {
256 .reg_off = HHI_HIFI_PLL_CNTL0,
257 .shift = 10,
258 .width = 5,
259 },
260 .frac = {
261 .reg_off = HHI_HIFI_PLL_CNTL1,
262 .shift = 0,
263 .width = 17,
264 },
265 .l = {
266 .reg_off = HHI_HIFI_PLL_CNTL0,
267 .shift = 31,
268 .width = 1,
269 },
270 .rst = {
271 .reg_off = HHI_HIFI_PLL_CNTL0,
272 .shift = 29,
273 .width = 1,
274 },
275 .range = &g12a_gp0_pll_mult_range,
276 .init_regs = g12a_hifi_init_regs,
277 .init_count = ARRAY_SIZE(g12a_hifi_init_regs),
278 .flags = CLK_MESON_PLL_ROUND_CLOSEST,
279 },
280 .hw.init = &(struct clk_init_data){
281 .name = "hifi_pll_dco",
282 .ops = &meson_clk_pll_ops,
283 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
284 .num_parents = 1,
285 },
286};
287
288static struct clk_regmap g12a_hifi_pll = {
289 .data = &(struct clk_regmap_div_data){
290 .offset = HHI_HIFI_PLL_CNTL0,
291 .shift = 16,
292 .width = 2,
293 .flags = (CLK_DIVIDER_POWER_OF_TWO |
294 CLK_DIVIDER_ROUND_CLOSEST),
295 },
296 .hw.init = &(struct clk_init_data){
297 .name = "hifi_pll",
298 .ops = &clk_regmap_divider_ops,
299 .parent_names = (const char *[]){ "hifi_pll_dco" },
300 .num_parents = 1,
301 .flags = CLK_SET_RATE_PARENT,
302 },
303};
304
305static struct clk_regmap g12a_hdmi_pll_dco = {
306 .data = &(struct meson_clk_pll_data){
307 .en = {
308 .reg_off = HHI_HDMI_PLL_CNTL0,
309 .shift = 28,
310 .width = 1,
311 },
312 .m = {
313 .reg_off = HHI_HDMI_PLL_CNTL0,
314 .shift = 0,
315 .width = 8,
316 },
317 .n = {
318 .reg_off = HHI_HDMI_PLL_CNTL0,
319 .shift = 10,
320 .width = 5,
321 },
322 .frac = {
323 .reg_off = HHI_HDMI_PLL_CNTL1,
324 .shift = 0,
325 .width = 16,
326 },
327 .l = {
328 .reg_off = HHI_HDMI_PLL_CNTL0,
329 .shift = 30,
330 .width = 1,
331 },
332 .rst = {
333 .reg_off = HHI_HDMI_PLL_CNTL0,
334 .shift = 29,
335 .width = 1,
336 },
337 },
338 .hw.init = &(struct clk_init_data){
339 .name = "hdmi_pll_dco",
340 .ops = &meson_clk_pll_ro_ops,
341 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
342 .num_parents = 1,
343 /*
344 * Display directly handle hdmi pll registers ATM, we need
345 * NOCACHE to keep our view of the clock as accurate as possible
346 */
347 .flags = CLK_GET_RATE_NOCACHE,
348 },
349};
350
351static struct clk_regmap g12a_hdmi_pll_od = {
352 .data = &(struct clk_regmap_div_data){
353 .offset = HHI_HDMI_PLL_CNTL0,
354 .shift = 16,
355 .width = 2,
356 .flags = CLK_DIVIDER_POWER_OF_TWO,
357 },
358 .hw.init = &(struct clk_init_data){
359 .name = "hdmi_pll_od",
360 .ops = &clk_regmap_divider_ro_ops,
361 .parent_names = (const char *[]){ "hdmi_pll_dco" },
362 .num_parents = 1,
363 .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
364 },
365};
366
367static struct clk_regmap g12a_hdmi_pll_od2 = {
368 .data = &(struct clk_regmap_div_data){
369 .offset = HHI_HDMI_PLL_CNTL0,
370 .shift = 18,
371 .width = 2,
372 .flags = CLK_DIVIDER_POWER_OF_TWO,
373 },
374 .hw.init = &(struct clk_init_data){
375 .name = "hdmi_pll_od2",
376 .ops = &clk_regmap_divider_ro_ops,
377 .parent_names = (const char *[]){ "hdmi_pll_od" },
378 .num_parents = 1,
379 .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
380 },
381};
382
383static struct clk_regmap g12a_hdmi_pll = {
384 .data = &(struct clk_regmap_div_data){
385 .offset = HHI_HDMI_PLL_CNTL0,
386 .shift = 20,
387 .width = 2,
388 .flags = CLK_DIVIDER_POWER_OF_TWO,
389 },
390 .hw.init = &(struct clk_init_data){
391 .name = "hdmi_pll",
392 .ops = &clk_regmap_divider_ro_ops,
393 .parent_names = (const char *[]){ "hdmi_pll_od2" },
394 .num_parents = 1,
395 .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
396 },
397};
398
399static struct clk_fixed_factor g12a_fclk_div2_div = {
400 .mult = 1,
401 .div = 2,
402 .hw.init = &(struct clk_init_data){
403 .name = "fclk_div2_div",
404 .ops = &clk_fixed_factor_ops,
405 .parent_names = (const char *[]){ "fixed_pll" },
406 .num_parents = 1,
407 },
408};
409
410static struct clk_regmap g12a_fclk_div2 = {
411 .data = &(struct clk_regmap_gate_data){
412 .offset = HHI_FIX_PLL_CNTL1,
413 .bit_idx = 24,
414 },
415 .hw.init = &(struct clk_init_data){
416 .name = "fclk_div2",
417 .ops = &clk_regmap_gate_ops,
418 .parent_names = (const char *[]){ "fclk_div2_div" },
419 .num_parents = 1,
420 },
421};
422
423static struct clk_fixed_factor g12a_fclk_div3_div = {
424 .mult = 1,
425 .div = 3,
426 .hw.init = &(struct clk_init_data){
427 .name = "fclk_div3_div",
428 .ops = &clk_fixed_factor_ops,
429 .parent_names = (const char *[]){ "fixed_pll" },
430 .num_parents = 1,
431 },
432};
433
434static struct clk_regmap g12a_fclk_div3 = {
435 .data = &(struct clk_regmap_gate_data){
436 .offset = HHI_FIX_PLL_CNTL1,
437 .bit_idx = 20,
438 },
439 .hw.init = &(struct clk_init_data){
440 .name = "fclk_div3",
441 .ops = &clk_regmap_gate_ops,
442 .parent_names = (const char *[]){ "fclk_div3_div" },
443 .num_parents = 1,
444 },
445};
446
447static struct clk_fixed_factor g12a_fclk_div4_div = {
448 .mult = 1,
449 .div = 4,
450 .hw.init = &(struct clk_init_data){
451 .name = "fclk_div4_div",
452 .ops = &clk_fixed_factor_ops,
453 .parent_names = (const char *[]){ "fixed_pll" },
454 .num_parents = 1,
455 },
456};
457
458static struct clk_regmap g12a_fclk_div4 = {
459 .data = &(struct clk_regmap_gate_data){
460 .offset = HHI_FIX_PLL_CNTL1,
461 .bit_idx = 21,
462 },
463 .hw.init = &(struct clk_init_data){
464 .name = "fclk_div4",
465 .ops = &clk_regmap_gate_ops,
466 .parent_names = (const char *[]){ "fclk_div4_div" },
467 .num_parents = 1,
468 },
469};
470
471static struct clk_fixed_factor g12a_fclk_div5_div = {
472 .mult = 1,
473 .div = 5,
474 .hw.init = &(struct clk_init_data){
475 .name = "fclk_div5_div",
476 .ops = &clk_fixed_factor_ops,
477 .parent_names = (const char *[]){ "fixed_pll" },
478 .num_parents = 1,
479 },
480};
481
482static struct clk_regmap g12a_fclk_div5 = {
483 .data = &(struct clk_regmap_gate_data){
484 .offset = HHI_FIX_PLL_CNTL1,
485 .bit_idx = 22,
486 },
487 .hw.init = &(struct clk_init_data){
488 .name = "fclk_div5",
489 .ops = &clk_regmap_gate_ops,
490 .parent_names = (const char *[]){ "fclk_div5_div" },
491 .num_parents = 1,
492 },
493};
494
495static struct clk_fixed_factor g12a_fclk_div7_div = {
496 .mult = 1,
497 .div = 7,
498 .hw.init = &(struct clk_init_data){
499 .name = "fclk_div7_div",
500 .ops = &clk_fixed_factor_ops,
501 .parent_names = (const char *[]){ "fixed_pll" },
502 .num_parents = 1,
503 },
504};
505
506static struct clk_regmap g12a_fclk_div7 = {
507 .data = &(struct clk_regmap_gate_data){
508 .offset = HHI_FIX_PLL_CNTL1,
509 .bit_idx = 23,
510 },
511 .hw.init = &(struct clk_init_data){
512 .name = "fclk_div7",
513 .ops = &clk_regmap_gate_ops,
514 .parent_names = (const char *[]){ "fclk_div7_div" },
515 .num_parents = 1,
516 },
517};
518
519static struct clk_fixed_factor g12a_fclk_div2p5_div = {
520 .mult = 1,
521 .div = 5,
522 .hw.init = &(struct clk_init_data){
523 .name = "fclk_div2p5_div",
524 .ops = &clk_fixed_factor_ops,
525 .parent_names = (const char *[]){ "fixed_pll_dco" },
526 .num_parents = 1,
527 },
528};
529
530static struct clk_regmap g12a_fclk_div2p5 = {
531 .data = &(struct clk_regmap_gate_data){
532 .offset = HHI_FIX_PLL_CNTL1,
533 .bit_idx = 25,
534 },
535 .hw.init = &(struct clk_init_data){
536 .name = "fclk_div2p5",
537 .ops = &clk_regmap_gate_ops,
538 .parent_names = (const char *[]){ "fclk_div2p5_div" },
539 .num_parents = 1,
540 },
541};
542
543static struct clk_fixed_factor g12a_mpll_50m_div = {
544 .mult = 1,
545 .div = 80,
546 .hw.init = &(struct clk_init_data){
547 .name = "mpll_50m_div",
548 .ops = &clk_fixed_factor_ops,
549 .parent_names = (const char *[]){ "fixed_pll_dco" },
550 .num_parents = 1,
551 },
552};
553
554static struct clk_regmap g12a_mpll_50m = {
555 .data = &(struct clk_regmap_mux_data){
556 .offset = HHI_FIX_PLL_CNTL3,
557 .mask = 0x1,
558 .shift = 5,
559 },
560 .hw.init = &(struct clk_init_data){
561 .name = "mpll_50m",
562 .ops = &clk_regmap_mux_ro_ops,
563 .parent_names = (const char *[]){ IN_PREFIX "xtal",
564 "mpll_50m_div" },
565 .num_parents = 2,
566 },
567};
568
569static struct clk_fixed_factor g12a_mpll_prediv = {
570 .mult = 1,
571 .div = 2,
572 .hw.init = &(struct clk_init_data){
573 .name = "mpll_prediv",
574 .ops = &clk_fixed_factor_ops,
575 .parent_names = (const char *[]){ "fixed_pll_dco" },
576 .num_parents = 1,
577 },
578};
579
580static struct clk_regmap g12a_mpll0_div = {
581 .data = &(struct meson_clk_mpll_data){
582 .sdm = {
583 .reg_off = HHI_MPLL_CNTL1,
584 .shift = 0,
585 .width = 14,
586 },
587 .sdm_en = {
588 .reg_off = HHI_MPLL_CNTL1,
589 .shift = 30,
590 .width = 1,
591 },
592 .n2 = {
593 .reg_off = HHI_MPLL_CNTL1,
594 .shift = 20,
595 .width = 9,
596 },
597 .ssen = {
598 .reg_off = HHI_MPLL_CNTL1,
599 .shift = 29,
600 .width = 1,
601 },
602 .lock = &meson_clk_lock,
603 },
604 .hw.init = &(struct clk_init_data){
605 .name = "mpll0_div",
606 .ops = &meson_clk_mpll_ops,
607 .parent_names = (const char *[]){ "mpll_prediv" },
608 .num_parents = 1,
609 },
610};
611
612static struct clk_regmap g12a_mpll0 = {
613 .data = &(struct clk_regmap_gate_data){
614 .offset = HHI_MPLL_CNTL1,
615 .bit_idx = 31,
616 },
617 .hw.init = &(struct clk_init_data){
618 .name = "mpll0",
619 .ops = &clk_regmap_gate_ops,
620 .parent_names = (const char *[]){ "mpll0_div" },
621 .num_parents = 1,
622 .flags = CLK_SET_RATE_PARENT,
623 },
624};
625
626static struct clk_regmap g12a_mpll1_div = {
627 .data = &(struct meson_clk_mpll_data){
628 .sdm = {
629 .reg_off = HHI_MPLL_CNTL3,
630 .shift = 0,
631 .width = 14,
632 },
633 .sdm_en = {
634 .reg_off = HHI_MPLL_CNTL3,
635 .shift = 30,
636 .width = 1,
637 },
638 .n2 = {
639 .reg_off = HHI_MPLL_CNTL3,
640 .shift = 20,
641 .width = 9,
642 },
643 .ssen = {
644 .reg_off = HHI_MPLL_CNTL3,
645 .shift = 29,
646 .width = 1,
647 },
648 .lock = &meson_clk_lock,
649 },
650 .hw.init = &(struct clk_init_data){
651 .name = "mpll1_div",
652 .ops = &meson_clk_mpll_ops,
653 .parent_names = (const char *[]){ "mpll_prediv" },
654 .num_parents = 1,
655 },
656};
657
658static struct clk_regmap g12a_mpll1 = {
659 .data = &(struct clk_regmap_gate_data){
660 .offset = HHI_MPLL_CNTL3,
661 .bit_idx = 31,
662 },
663 .hw.init = &(struct clk_init_data){
664 .name = "mpll1",
665 .ops = &clk_regmap_gate_ops,
666 .parent_names = (const char *[]){ "mpll1_div" },
667 .num_parents = 1,
668 .flags = CLK_SET_RATE_PARENT,
669 },
670};
671
672static struct clk_regmap g12a_mpll2_div = {
673 .data = &(struct meson_clk_mpll_data){
674 .sdm = {
675 .reg_off = HHI_MPLL_CNTL5,
676 .shift = 0,
677 .width = 14,
678 },
679 .sdm_en = {
680 .reg_off = HHI_MPLL_CNTL5,
681 .shift = 30,
682 .width = 1,
683 },
684 .n2 = {
685 .reg_off = HHI_MPLL_CNTL5,
686 .shift = 20,
687 .width = 9,
688 },
689 .ssen = {
690 .reg_off = HHI_MPLL_CNTL5,
691 .shift = 29,
692 .width = 1,
693 },
694 .lock = &meson_clk_lock,
695 },
696 .hw.init = &(struct clk_init_data){
697 .name = "mpll2_div",
698 .ops = &meson_clk_mpll_ops,
699 .parent_names = (const char *[]){ "mpll_prediv" },
700 .num_parents = 1,
701 },
702};
703
704static struct clk_regmap g12a_mpll2 = {
705 .data = &(struct clk_regmap_gate_data){
706 .offset = HHI_MPLL_CNTL5,
707 .bit_idx = 31,
708 },
709 .hw.init = &(struct clk_init_data){
710 .name = "mpll2",
711 .ops = &clk_regmap_gate_ops,
712 .parent_names = (const char *[]){ "mpll2_div" },
713 .num_parents = 1,
714 .flags = CLK_SET_RATE_PARENT,
715 },
716};
717
718static struct clk_regmap g12a_mpll3_div = {
719 .data = &(struct meson_clk_mpll_data){
720 .sdm = {
721 .reg_off = HHI_MPLL_CNTL7,
722 .shift = 0,
723 .width = 14,
724 },
725 .sdm_en = {
726 .reg_off = HHI_MPLL_CNTL7,
727 .shift = 30,
728 .width = 1,
729 },
730 .n2 = {
731 .reg_off = HHI_MPLL_CNTL7,
732 .shift = 20,
733 .width = 9,
734 },
735 .ssen = {
736 .reg_off = HHI_MPLL_CNTL7,
737 .shift = 29,
738 .width = 1,
739 },
740 .lock = &meson_clk_lock,
741 },
742 .hw.init = &(struct clk_init_data){
743 .name = "mpll3_div",
744 .ops = &meson_clk_mpll_ops,
745 .parent_names = (const char *[]){ "mpll_prediv" },
746 .num_parents = 1,
747 },
748};
749
750static struct clk_regmap g12a_mpll3 = {
751 .data = &(struct clk_regmap_gate_data){
752 .offset = HHI_MPLL_CNTL7,
753 .bit_idx = 31,
754 },
755 .hw.init = &(struct clk_init_data){
756 .name = "mpll3",
757 .ops = &clk_regmap_gate_ops,
758 .parent_names = (const char *[]){ "mpll3_div" },
759 .num_parents = 1,
760 .flags = CLK_SET_RATE_PARENT,
761 },
762};
763
764static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
765static const char * const clk81_parent_names[] = {
766 IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
767 "fclk_div3", "fclk_div5"
768};
769
770static struct clk_regmap g12a_mpeg_clk_sel = {
771 .data = &(struct clk_regmap_mux_data){
772 .offset = HHI_MPEG_CLK_CNTL,
773 .mask = 0x7,
774 .shift = 12,
775 .table = mux_table_clk81,
776 },
777 .hw.init = &(struct clk_init_data){
778 .name = "mpeg_clk_sel",
779 .ops = &clk_regmap_mux_ro_ops,
780 .parent_names = clk81_parent_names,
781 .num_parents = ARRAY_SIZE(clk81_parent_names),
782 },
783};
784
785static struct clk_regmap g12a_mpeg_clk_div = {
786 .data = &(struct clk_regmap_div_data){
787 .offset = HHI_MPEG_CLK_CNTL,
788 .shift = 0,
789 .width = 7,
790 },
791 .hw.init = &(struct clk_init_data){
792 .name = "mpeg_clk_div",
793 .ops = &clk_regmap_divider_ops,
794 .parent_names = (const char *[]){ "mpeg_clk_sel" },
795 .num_parents = 1,
796 .flags = CLK_SET_RATE_PARENT,
797 },
798};
799
800static struct clk_regmap g12a_clk81 = {
801 .data = &(struct clk_regmap_gate_data){
802 .offset = HHI_MPEG_CLK_CNTL,
803 .bit_idx = 7,
804 },
805 .hw.init = &(struct clk_init_data){
806 .name = "clk81",
807 .ops = &clk_regmap_gate_ops,
808 .parent_names = (const char *[]){ "mpeg_clk_div" },
809 .num_parents = 1,
810 .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
811 },
812};
813
814static const char * const g12a_sd_emmc_clk0_parent_names[] = {
815 IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
816
817 /*
818 * Following these parent clocks, we should also have had mpll2, mpll3
819 * and gp0_pll but these clocks are too precious to be used here. All
820 * the necessary rates for MMC and NAND operation can be acheived using
821 * g12a_ee_core or fclk_div clocks
822 */
823};
824
825/* SDIO clock */
826static struct clk_regmap g12a_sd_emmc_a_clk0_sel = {
827 .data = &(struct clk_regmap_mux_data){
828 .offset = HHI_SD_EMMC_CLK_CNTL,
829 .mask = 0x7,
830 .shift = 9,
831 },
832 .hw.init = &(struct clk_init_data) {
833 .name = "sd_emmc_a_clk0_sel",
834 .ops = &clk_regmap_mux_ops,
835 .parent_names = g12a_sd_emmc_clk0_parent_names,
836 .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
837 .flags = CLK_SET_RATE_PARENT,
838 },
839};
840
841static struct clk_regmap g12a_sd_emmc_a_clk0_div = {
842 .data = &(struct clk_regmap_div_data){
843 .offset = HHI_SD_EMMC_CLK_CNTL,
844 .shift = 0,
845 .width = 7,
846 },
847 .hw.init = &(struct clk_init_data) {
848 .name = "sd_emmc_a_clk0_div",
849 .ops = &clk_regmap_divider_ops,
850 .parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" },
851 .num_parents = 1,
852 .flags = CLK_SET_RATE_PARENT,
853 },
854};
855
856static struct clk_regmap g12a_sd_emmc_a_clk0 = {
857 .data = &(struct clk_regmap_gate_data){
858 .offset = HHI_SD_EMMC_CLK_CNTL,
859 .bit_idx = 7,
860 },
861 .hw.init = &(struct clk_init_data){
862 .name = "sd_emmc_a_clk0",
863 .ops = &clk_regmap_gate_ops,
864 .parent_names = (const char *[]){ "sd_emmc_a_clk0_div" },
865 .num_parents = 1,
866 .flags = CLK_SET_RATE_PARENT,
867 },
868};
869
870/* SDcard clock */
871static struct clk_regmap g12a_sd_emmc_b_clk0_sel = {
872 .data = &(struct clk_regmap_mux_data){
873 .offset = HHI_SD_EMMC_CLK_CNTL,
874 .mask = 0x7,
875 .shift = 25,
876 },
877 .hw.init = &(struct clk_init_data) {
878 .name = "sd_emmc_b_clk0_sel",
879 .ops = &clk_regmap_mux_ops,
880 .parent_names = g12a_sd_emmc_clk0_parent_names,
881 .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
882 .flags = CLK_SET_RATE_PARENT,
883 },
884};
885
886static struct clk_regmap g12a_sd_emmc_b_clk0_div = {
887 .data = &(struct clk_regmap_div_data){
888 .offset = HHI_SD_EMMC_CLK_CNTL,
889 .shift = 16,
890 .width = 7,
891 },
892 .hw.init = &(struct clk_init_data) {
893 .name = "sd_emmc_b_clk0_div",
894 .ops = &clk_regmap_divider_ops,
895 .parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
896 .num_parents = 1,
897 .flags = CLK_SET_RATE_PARENT,
898 },
899};
900
901static struct clk_regmap g12a_sd_emmc_b_clk0 = {
902 .data = &(struct clk_regmap_gate_data){
903 .offset = HHI_SD_EMMC_CLK_CNTL,
904 .bit_idx = 23,
905 },
906 .hw.init = &(struct clk_init_data){
907 .name = "sd_emmc_b_clk0",
908 .ops = &clk_regmap_gate_ops,
909 .parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
910 .num_parents = 1,
911 .flags = CLK_SET_RATE_PARENT,
912 },
913};
914
915/* EMMC/NAND clock */
916static struct clk_regmap g12a_sd_emmc_c_clk0_sel = {
917 .data = &(struct clk_regmap_mux_data){
918 .offset = HHI_NAND_CLK_CNTL,
919 .mask = 0x7,
920 .shift = 9,
921 },
922 .hw.init = &(struct clk_init_data) {
923 .name = "sd_emmc_c_clk0_sel",
924 .ops = &clk_regmap_mux_ops,
925 .parent_names = g12a_sd_emmc_clk0_parent_names,
926 .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
927 .flags = CLK_SET_RATE_PARENT,
928 },
929};
930
931static struct clk_regmap g12a_sd_emmc_c_clk0_div = {
932 .data = &(struct clk_regmap_div_data){
933 .offset = HHI_NAND_CLK_CNTL,
934 .shift = 0,
935 .width = 7,
936 },
937 .hw.init = &(struct clk_init_data) {
938 .name = "sd_emmc_c_clk0_div",
939 .ops = &clk_regmap_divider_ops,
940 .parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
941 .num_parents = 1,
942 .flags = CLK_SET_RATE_PARENT,
943 },
944};
945
946static struct clk_regmap g12a_sd_emmc_c_clk0 = {
947 .data = &(struct clk_regmap_gate_data){
948 .offset = HHI_NAND_CLK_CNTL,
949 .bit_idx = 7,
950 },
951 .hw.init = &(struct clk_init_data){
952 .name = "sd_emmc_c_clk0",
953 .ops = &clk_regmap_gate_ops,
954 .parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
955 .num_parents = 1,
956 .flags = CLK_SET_RATE_PARENT,
957 },
958};
959
960/* VPU Clock */
961
962static const char * const g12a_vpu_parent_names[] = {
963 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
964 "mpll1", "vid_pll", "hifi_pll", "gp0_pll",
965};
966
967static struct clk_regmap g12a_vpu_0_sel = {
968 .data = &(struct clk_regmap_mux_data){
969 .offset = HHI_VPU_CLK_CNTL,
970 .mask = 0x3,
971 .shift = 9,
972 },
973 .hw.init = &(struct clk_init_data){
974 .name = "vpu_0_sel",
975 .ops = &clk_regmap_mux_ops,
976 .parent_names = g12a_vpu_parent_names,
977 .num_parents = ARRAY_SIZE(g12a_vpu_parent_names),
978 .flags = CLK_SET_RATE_NO_REPARENT,
979 },
980};
981
982static struct clk_regmap g12a_vpu_0_div = {
983 .data = &(struct clk_regmap_div_data){
984 .offset = HHI_VPU_CLK_CNTL,
985 .shift = 0,
986 .width = 7,
987 },
988 .hw.init = &(struct clk_init_data){
989 .name = "vpu_0_div",
990 .ops = &clk_regmap_divider_ops,
991 .parent_names = (const char *[]){ "vpu_0_sel" },
992 .num_parents = 1,
993 .flags = CLK_SET_RATE_PARENT,
994 },
995};
996
997static struct clk_regmap g12a_vpu_0 = {
998 .data = &(struct clk_regmap_gate_data){
999 .offset = HHI_VPU_CLK_CNTL,
1000 .bit_idx = 8,
1001 },
1002 .hw.init = &(struct clk_init_data) {
1003 .name = "vpu_0",
1004 .ops = &clk_regmap_gate_ops,
1005 .parent_names = (const char *[]){ "vpu_0_div" },
1006 .num_parents = 1,
1007 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1008 },
1009};
1010
1011static struct clk_regmap g12a_vpu_1_sel = {
1012 .data = &(struct clk_regmap_mux_data){
1013 .offset = HHI_VPU_CLK_CNTL,
1014 .mask = 0x3,
1015 .shift = 25,
1016 },
1017 .hw.init = &(struct clk_init_data){
1018 .name = "vpu_1_sel",
1019 .ops = &clk_regmap_mux_ops,
1020 .parent_names = g12a_vpu_parent_names,
1021 .num_parents = ARRAY_SIZE(g12a_vpu_parent_names),
1022 .flags = CLK_SET_RATE_NO_REPARENT,
1023 },
1024};
1025
1026static struct clk_regmap g12a_vpu_1_div = {
1027 .data = &(struct clk_regmap_div_data){
1028 .offset = HHI_VPU_CLK_CNTL,
1029 .shift = 16,
1030 .width = 7,
1031 },
1032 .hw.init = &(struct clk_init_data){
1033 .name = "vpu_1_div",
1034 .ops = &clk_regmap_divider_ops,
1035 .parent_names = (const char *[]){ "vpu_1_sel" },
1036 .num_parents = 1,
1037 .flags = CLK_SET_RATE_PARENT,
1038 },
1039};
1040
1041static struct clk_regmap g12a_vpu_1 = {
1042 .data = &(struct clk_regmap_gate_data){
1043 .offset = HHI_VPU_CLK_CNTL,
1044 .bit_idx = 24,
1045 },
1046 .hw.init = &(struct clk_init_data) {
1047 .name = "vpu_1",
1048 .ops = &clk_regmap_gate_ops,
1049 .parent_names = (const char *[]){ "vpu_1_div" },
1050 .num_parents = 1,
1051 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1052 },
1053};
1054
1055static struct clk_regmap g12a_vpu = {
1056 .data = &(struct clk_regmap_mux_data){
1057 .offset = HHI_VPU_CLK_CNTL,
1058 .mask = 1,
1059 .shift = 31,
1060 },
1061 .hw.init = &(struct clk_init_data){
1062 .name = "vpu",
1063 .ops = &clk_regmap_mux_ops,
1064 /*
1065 * bit 31 selects from 2 possible parents:
1066 * vpu_0 or vpu_1
1067 */
1068 .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
1069 .num_parents = 2,
1070 .flags = CLK_SET_RATE_NO_REPARENT,
1071 },
1072};
1073
1074/* VAPB Clock */
1075
1076static const char * const g12a_vapb_parent_names[] = {
1077 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
1078 "mpll1", "vid_pll", "mpll2", "fclk_div2p5",
1079};
1080
1081static struct clk_regmap g12a_vapb_0_sel = {
1082 .data = &(struct clk_regmap_mux_data){
1083 .offset = HHI_VAPBCLK_CNTL,
1084 .mask = 0x3,
1085 .shift = 9,
1086 },
1087 .hw.init = &(struct clk_init_data){
1088 .name = "vapb_0_sel",
1089 .ops = &clk_regmap_mux_ops,
1090 .parent_names = g12a_vapb_parent_names,
1091 .num_parents = ARRAY_SIZE(g12a_vapb_parent_names),
1092 .flags = CLK_SET_RATE_NO_REPARENT,
1093 },
1094};
1095
1096static struct clk_regmap g12a_vapb_0_div = {
1097 .data = &(struct clk_regmap_div_data){
1098 .offset = HHI_VAPBCLK_CNTL,
1099 .shift = 0,
1100 .width = 7,
1101 },
1102 .hw.init = &(struct clk_init_data){
1103 .name = "vapb_0_div",
1104 .ops = &clk_regmap_divider_ops,
1105 .parent_names = (const char *[]){ "vapb_0_sel" },
1106 .num_parents = 1,
1107 .flags = CLK_SET_RATE_PARENT,
1108 },
1109};
1110
1111static struct clk_regmap g12a_vapb_0 = {
1112 .data = &(struct clk_regmap_gate_data){
1113 .offset = HHI_VAPBCLK_CNTL,
1114 .bit_idx = 8,
1115 },
1116 .hw.init = &(struct clk_init_data) {
1117 .name = "vapb_0",
1118 .ops = &clk_regmap_gate_ops,
1119 .parent_names = (const char *[]){ "vapb_0_div" },
1120 .num_parents = 1,
1121 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1122 },
1123};
1124
1125static struct clk_regmap g12a_vapb_1_sel = {
1126 .data = &(struct clk_regmap_mux_data){
1127 .offset = HHI_VAPBCLK_CNTL,
1128 .mask = 0x3,
1129 .shift = 25,
1130 },
1131 .hw.init = &(struct clk_init_data){
1132 .name = "vapb_1_sel",
1133 .ops = &clk_regmap_mux_ops,
1134 .parent_names = g12a_vapb_parent_names,
1135 .num_parents = ARRAY_SIZE(g12a_vapb_parent_names),
1136 .flags = CLK_SET_RATE_NO_REPARENT,
1137 },
1138};
1139
1140static struct clk_regmap g12a_vapb_1_div = {
1141 .data = &(struct clk_regmap_div_data){
1142 .offset = HHI_VAPBCLK_CNTL,
1143 .shift = 16,
1144 .width = 7,
1145 },
1146 .hw.init = &(struct clk_init_data){
1147 .name = "vapb_1_div",
1148 .ops = &clk_regmap_divider_ops,
1149 .parent_names = (const char *[]){ "vapb_1_sel" },
1150 .num_parents = 1,
1151 .flags = CLK_SET_RATE_PARENT,
1152 },
1153};
1154
1155static struct clk_regmap g12a_vapb_1 = {
1156 .data = &(struct clk_regmap_gate_data){
1157 .offset = HHI_VAPBCLK_CNTL,
1158 .bit_idx = 24,
1159 },
1160 .hw.init = &(struct clk_init_data) {
1161 .name = "vapb_1",
1162 .ops = &clk_regmap_gate_ops,
1163 .parent_names = (const char *[]){ "vapb_1_div" },
1164 .num_parents = 1,
1165 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1166 },
1167};
1168
1169static struct clk_regmap g12a_vapb_sel = {
1170 .data = &(struct clk_regmap_mux_data){
1171 .offset = HHI_VAPBCLK_CNTL,
1172 .mask = 1,
1173 .shift = 31,
1174 },
1175 .hw.init = &(struct clk_init_data){
1176 .name = "vapb_sel",
1177 .ops = &clk_regmap_mux_ops,
1178 /*
1179 * bit 31 selects from 2 possible parents:
1180 * vapb_0 or vapb_1
1181 */
1182 .parent_names = (const char *[]){ "vapb_0", "vapb_1" },
1183 .num_parents = 2,
1184 .flags = CLK_SET_RATE_NO_REPARENT,
1185 },
1186};
1187
1188static struct clk_regmap g12a_vapb = {
1189 .data = &(struct clk_regmap_gate_data){
1190 .offset = HHI_VAPBCLK_CNTL,
1191 .bit_idx = 30,
1192 },
1193 .hw.init = &(struct clk_init_data) {
1194 .name = "vapb",
1195 .ops = &clk_regmap_gate_ops,
1196 .parent_names = (const char *[]){ "vapb_sel" },
1197 .num_parents = 1,
1198 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1199 },
1200};
1201
1202/* Video Clocks */
1203
1204static struct clk_regmap g12a_vid_pll_div = {
1205 .data = &(struct meson_vid_pll_div_data){
1206 .val = {
1207 .reg_off = HHI_VID_PLL_CLK_DIV,
1208 .shift = 0,
1209 .width = 15,
1210 },
1211 .sel = {
1212 .reg_off = HHI_VID_PLL_CLK_DIV,
1213 .shift = 16,
1214 .width = 2,
1215 },
1216 },
1217 .hw.init = &(struct clk_init_data) {
1218 .name = "vid_pll_div",
1219 .ops = &meson_vid_pll_div_ro_ops,
1220 .parent_names = (const char *[]){ "hdmi_pll" },
1221 .num_parents = 1,
1222 .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
1223 },
1224};
1225
1226static const char * const g12a_vid_pll_parent_names[] = { "vid_pll_div",
1227 "hdmi_pll" };
1228
1229static struct clk_regmap g12a_vid_pll_sel = {
1230 .data = &(struct clk_regmap_mux_data){
1231 .offset = HHI_VID_PLL_CLK_DIV,
1232 .mask = 0x1,
1233 .shift = 18,
1234 },
1235 .hw.init = &(struct clk_init_data){
1236 .name = "vid_pll_sel",
1237 .ops = &clk_regmap_mux_ops,
1238 /*
1239 * bit 18 selects from 2 possible parents:
1240 * vid_pll_div or hdmi_pll
1241 */
1242 .parent_names = g12a_vid_pll_parent_names,
1243 .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_names),
1244 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1245 },
1246};
1247
1248static struct clk_regmap g12a_vid_pll = {
1249 .data = &(struct clk_regmap_gate_data){
1250 .offset = HHI_VID_PLL_CLK_DIV,
1251 .bit_idx = 19,
1252 },
1253 .hw.init = &(struct clk_init_data) {
1254 .name = "vid_pll",
1255 .ops = &clk_regmap_gate_ops,
1256 .parent_names = (const char *[]){ "vid_pll_sel" },
1257 .num_parents = 1,
1258 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1259 },
1260};
1261
1262static const char * const g12a_vclk_parent_names[] = {
1263 "vid_pll", "gp0_pll", "hifi_pll", "mpll1", "fclk_div3", "fclk_div4",
1264 "fclk_div5", "fclk_div7"
1265};
1266
1267static struct clk_regmap g12a_vclk_sel = {
1268 .data = &(struct clk_regmap_mux_data){
1269 .offset = HHI_VID_CLK_CNTL,
1270 .mask = 0x7,
1271 .shift = 16,
1272 },
1273 .hw.init = &(struct clk_init_data){
1274 .name = "vclk_sel",
1275 .ops = &clk_regmap_mux_ops,
1276 .parent_names = g12a_vclk_parent_names,
1277 .num_parents = ARRAY_SIZE(g12a_vclk_parent_names),
1278 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1279 },
1280};
1281
1282static struct clk_regmap g12a_vclk2_sel = {
1283 .data = &(struct clk_regmap_mux_data){
1284 .offset = HHI_VIID_CLK_CNTL,
1285 .mask = 0x7,
1286 .shift = 16,
1287 },
1288 .hw.init = &(struct clk_init_data){
1289 .name = "vclk2_sel",
1290 .ops = &clk_regmap_mux_ops,
1291 .parent_names = g12a_vclk_parent_names,
1292 .num_parents = ARRAY_SIZE(g12a_vclk_parent_names),
1293 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1294 },
1295};
1296
1297static struct clk_regmap g12a_vclk_input = {
1298 .data = &(struct clk_regmap_gate_data){
1299 .offset = HHI_VID_CLK_DIV,
1300 .bit_idx = 16,
1301 },
1302 .hw.init = &(struct clk_init_data) {
1303 .name = "vclk_input",
1304 .ops = &clk_regmap_gate_ops,
1305 .parent_names = (const char *[]){ "vclk_sel" },
1306 .num_parents = 1,
1307 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1308 },
1309};
1310
1311static struct clk_regmap g12a_vclk2_input = {
1312 .data = &(struct clk_regmap_gate_data){
1313 .offset = HHI_VIID_CLK_DIV,
1314 .bit_idx = 16,
1315 },
1316 .hw.init = &(struct clk_init_data) {
1317 .name = "vclk2_input",
1318 .ops = &clk_regmap_gate_ops,
1319 .parent_names = (const char *[]){ "vclk2_sel" },
1320 .num_parents = 1,
1321 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1322 },
1323};
1324
1325static struct clk_regmap g12a_vclk_div = {
1326 .data = &(struct clk_regmap_div_data){
1327 .offset = HHI_VID_CLK_DIV,
1328 .shift = 0,
1329 .width = 8,
1330 },
1331 .hw.init = &(struct clk_init_data){
1332 .name = "vclk_div",
1333 .ops = &clk_regmap_divider_ops,
1334 .parent_names = (const char *[]){ "vclk_input" },
1335 .num_parents = 1,
1336 .flags = CLK_GET_RATE_NOCACHE,
1337 },
1338};
1339
1340static struct clk_regmap g12a_vclk2_div = {
1341 .data = &(struct clk_regmap_div_data){
1342 .offset = HHI_VIID_CLK_DIV,
1343 .shift = 0,
1344 .width = 8,
1345 },
1346 .hw.init = &(struct clk_init_data){
1347 .name = "vclk2_div",
1348 .ops = &clk_regmap_divider_ops,
1349 .parent_names = (const char *[]){ "vclk2_input" },
1350 .num_parents = 1,
1351 .flags = CLK_GET_RATE_NOCACHE,
1352 },
1353};
1354
1355static struct clk_regmap g12a_vclk = {
1356 .data = &(struct clk_regmap_gate_data){
1357 .offset = HHI_VID_CLK_CNTL,
1358 .bit_idx = 19,
1359 },
1360 .hw.init = &(struct clk_init_data) {
1361 .name = "vclk",
1362 .ops = &clk_regmap_gate_ops,
1363 .parent_names = (const char *[]){ "vclk_div" },
1364 .num_parents = 1,
1365 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1366 },
1367};
1368
1369static struct clk_regmap g12a_vclk2 = {
1370 .data = &(struct clk_regmap_gate_data){
1371 .offset = HHI_VIID_CLK_CNTL,
1372 .bit_idx = 19,
1373 },
1374 .hw.init = &(struct clk_init_data) {
1375 .name = "vclk2",
1376 .ops = &clk_regmap_gate_ops,
1377 .parent_names = (const char *[]){ "vclk2_div" },
1378 .num_parents = 1,
1379 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1380 },
1381};
1382
1383static struct clk_regmap g12a_vclk_div1 = {
1384 .data = &(struct clk_regmap_gate_data){
1385 .offset = HHI_VID_CLK_CNTL,
1386 .bit_idx = 0,
1387 },
1388 .hw.init = &(struct clk_init_data) {
1389 .name = "vclk_div1",
1390 .ops = &clk_regmap_gate_ops,
1391 .parent_names = (const char *[]){ "vclk" },
1392 .num_parents = 1,
1393 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1394 },
1395};
1396
1397static struct clk_regmap g12a_vclk_div2_en = {
1398 .data = &(struct clk_regmap_gate_data){
1399 .offset = HHI_VID_CLK_CNTL,
1400 .bit_idx = 1,
1401 },
1402 .hw.init = &(struct clk_init_data) {
1403 .name = "vclk_div2_en",
1404 .ops = &clk_regmap_gate_ops,
1405 .parent_names = (const char *[]){ "vclk" },
1406 .num_parents = 1,
1407 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1408 },
1409};
1410
1411static struct clk_regmap g12a_vclk_div4_en = {
1412 .data = &(struct clk_regmap_gate_data){
1413 .offset = HHI_VID_CLK_CNTL,
1414 .bit_idx = 2,
1415 },
1416 .hw.init = &(struct clk_init_data) {
1417 .name = "vclk_div4_en",
1418 .ops = &clk_regmap_gate_ops,
1419 .parent_names = (const char *[]){ "vclk" },
1420 .num_parents = 1,
1421 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1422 },
1423};
1424
1425static struct clk_regmap g12a_vclk_div6_en = {
1426 .data = &(struct clk_regmap_gate_data){
1427 .offset = HHI_VID_CLK_CNTL,
1428 .bit_idx = 3,
1429 },
1430 .hw.init = &(struct clk_init_data) {
1431 .name = "vclk_div6_en",
1432 .ops = &clk_regmap_gate_ops,
1433 .parent_names = (const char *[]){ "vclk" },
1434 .num_parents = 1,
1435 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1436 },
1437};
1438
1439static struct clk_regmap g12a_vclk_div12_en = {
1440 .data = &(struct clk_regmap_gate_data){
1441 .offset = HHI_VID_CLK_CNTL,
1442 .bit_idx = 4,
1443 },
1444 .hw.init = &(struct clk_init_data) {
1445 .name = "vclk_div12_en",
1446 .ops = &clk_regmap_gate_ops,
1447 .parent_names = (const char *[]){ "vclk" },
1448 .num_parents = 1,
1449 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1450 },
1451};
1452
1453static struct clk_regmap g12a_vclk2_div1 = {
1454 .data = &(struct clk_regmap_gate_data){
1455 .offset = HHI_VIID_CLK_CNTL,
1456 .bit_idx = 0,
1457 },
1458 .hw.init = &(struct clk_init_data) {
1459 .name = "vclk2_div1",
1460 .ops = &clk_regmap_gate_ops,
1461 .parent_names = (const char *[]){ "vclk2" },
1462 .num_parents = 1,
1463 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1464 },
1465};
1466
1467static struct clk_regmap g12a_vclk2_div2_en = {
1468 .data = &(struct clk_regmap_gate_data){
1469 .offset = HHI_VIID_CLK_CNTL,
1470 .bit_idx = 1,
1471 },
1472 .hw.init = &(struct clk_init_data) {
1473 .name = "vclk2_div2_en",
1474 .ops = &clk_regmap_gate_ops,
1475 .parent_names = (const char *[]){ "vclk2" },
1476 .num_parents = 1,
1477 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1478 },
1479};
1480
1481static struct clk_regmap g12a_vclk2_div4_en = {
1482 .data = &(struct clk_regmap_gate_data){
1483 .offset = HHI_VIID_CLK_CNTL,
1484 .bit_idx = 2,
1485 },
1486 .hw.init = &(struct clk_init_data) {
1487 .name = "vclk2_div4_en",
1488 .ops = &clk_regmap_gate_ops,
1489 .parent_names = (const char *[]){ "vclk2" },
1490 .num_parents = 1,
1491 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1492 },
1493};
1494
1495static struct clk_regmap g12a_vclk2_div6_en = {
1496 .data = &(struct clk_regmap_gate_data){
1497 .offset = HHI_VIID_CLK_CNTL,
1498 .bit_idx = 3,
1499 },
1500 .hw.init = &(struct clk_init_data) {
1501 .name = "vclk2_div6_en",
1502 .ops = &clk_regmap_gate_ops,
1503 .parent_names = (const char *[]){ "vclk2" },
1504 .num_parents = 1,
1505 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1506 },
1507};
1508
1509static struct clk_regmap g12a_vclk2_div12_en = {
1510 .data = &(struct clk_regmap_gate_data){
1511 .offset = HHI_VIID_CLK_CNTL,
1512 .bit_idx = 4,
1513 },
1514 .hw.init = &(struct clk_init_data) {
1515 .name = "vclk2_div12_en",
1516 .ops = &clk_regmap_gate_ops,
1517 .parent_names = (const char *[]){ "vclk2" },
1518 .num_parents = 1,
1519 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1520 },
1521};
1522
1523static struct clk_fixed_factor g12a_vclk_div2 = {
1524 .mult = 1,
1525 .div = 2,
1526 .hw.init = &(struct clk_init_data){
1527 .name = "vclk_div2",
1528 .ops = &clk_fixed_factor_ops,
1529 .parent_names = (const char *[]){ "vclk_div2_en" },
1530 .num_parents = 1,
1531 },
1532};
1533
1534static struct clk_fixed_factor g12a_vclk_div4 = {
1535 .mult = 1,
1536 .div = 4,
1537 .hw.init = &(struct clk_init_data){
1538 .name = "vclk_div4",
1539 .ops = &clk_fixed_factor_ops,
1540 .parent_names = (const char *[]){ "vclk_div4_en" },
1541 .num_parents = 1,
1542 },
1543};
1544
1545static struct clk_fixed_factor g12a_vclk_div6 = {
1546 .mult = 1,
1547 .div = 6,
1548 .hw.init = &(struct clk_init_data){
1549 .name = "vclk_div6",
1550 .ops = &clk_fixed_factor_ops,
1551 .parent_names = (const char *[]){ "vclk_div6_en" },
1552 .num_parents = 1,
1553 },
1554};
1555
1556static struct clk_fixed_factor g12a_vclk_div12 = {
1557 .mult = 1,
1558 .div = 12,
1559 .hw.init = &(struct clk_init_data){
1560 .name = "vclk_div12",
1561 .ops = &clk_fixed_factor_ops,
1562 .parent_names = (const char *[]){ "vclk_div12_en" },
1563 .num_parents = 1,
1564 },
1565};
1566
1567static struct clk_fixed_factor g12a_vclk2_div2 = {
1568 .mult = 1,
1569 .div = 2,
1570 .hw.init = &(struct clk_init_data){
1571 .name = "vclk2_div2",
1572 .ops = &clk_fixed_factor_ops,
1573 .parent_names = (const char *[]){ "vclk2_div2_en" },
1574 .num_parents = 1,
1575 },
1576};
1577
1578static struct clk_fixed_factor g12a_vclk2_div4 = {
1579 .mult = 1,
1580 .div = 4,
1581 .hw.init = &(struct clk_init_data){
1582 .name = "vclk2_div4",
1583 .ops = &clk_fixed_factor_ops,
1584 .parent_names = (const char *[]){ "vclk2_div4_en" },
1585 .num_parents = 1,
1586 },
1587};
1588
1589static struct clk_fixed_factor g12a_vclk2_div6 = {
1590 .mult = 1,
1591 .div = 6,
1592 .hw.init = &(struct clk_init_data){
1593 .name = "vclk2_div6",
1594 .ops = &clk_fixed_factor_ops,
1595 .parent_names = (const char *[]){ "vclk2_div6_en" },
1596 .num_parents = 1,
1597 },
1598};
1599
1600static struct clk_fixed_factor g12a_vclk2_div12 = {
1601 .mult = 1,
1602 .div = 12,
1603 .hw.init = &(struct clk_init_data){
1604 .name = "vclk2_div12",
1605 .ops = &clk_fixed_factor_ops,
1606 .parent_names = (const char *[]){ "vclk2_div12_en" },
1607 .num_parents = 1,
1608 },
1609};
1610
1611static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
1612static const char * const g12a_cts_parent_names[] = {
1613 "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
1614 "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
1615 "vclk2_div6", "vclk2_div12"
1616};
1617
1618static struct clk_regmap g12a_cts_enci_sel = {
1619 .data = &(struct clk_regmap_mux_data){
1620 .offset = HHI_VID_CLK_DIV,
1621 .mask = 0xf,
1622 .shift = 28,
1623 .table = mux_table_cts_sel,
1624 },
1625 .hw.init = &(struct clk_init_data){
1626 .name = "cts_enci_sel",
1627 .ops = &clk_regmap_mux_ops,
1628 .parent_names = g12a_cts_parent_names,
1629 .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
1630 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1631 },
1632};
1633
1634static struct clk_regmap g12a_cts_encp_sel = {
1635 .data = &(struct clk_regmap_mux_data){
1636 .offset = HHI_VID_CLK_DIV,
1637 .mask = 0xf,
1638 .shift = 20,
1639 .table = mux_table_cts_sel,
1640 },
1641 .hw.init = &(struct clk_init_data){
1642 .name = "cts_encp_sel",
1643 .ops = &clk_regmap_mux_ops,
1644 .parent_names = g12a_cts_parent_names,
1645 .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
1646 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1647 },
1648};
1649
1650static struct clk_regmap g12a_cts_vdac_sel = {
1651 .data = &(struct clk_regmap_mux_data){
1652 .offset = HHI_VIID_CLK_DIV,
1653 .mask = 0xf,
1654 .shift = 28,
1655 .table = mux_table_cts_sel,
1656 },
1657 .hw.init = &(struct clk_init_data){
1658 .name = "cts_vdac_sel",
1659 .ops = &clk_regmap_mux_ops,
1660 .parent_names = g12a_cts_parent_names,
1661 .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
1662 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1663 },
1664};
1665
1666/* TOFIX: add support for cts_tcon */
1667static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
1668static const char * const g12a_cts_hdmi_tx_parent_names[] = {
1669 "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
1670 "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
1671 "vclk2_div6", "vclk2_div12"
1672};
1673
1674static struct clk_regmap g12a_hdmi_tx_sel = {
1675 .data = &(struct clk_regmap_mux_data){
1676 .offset = HHI_HDMI_CLK_CNTL,
1677 .mask = 0xf,
1678 .shift = 16,
1679 .table = mux_table_hdmi_tx_sel,
1680 },
1681 .hw.init = &(struct clk_init_data){
1682 .name = "hdmi_tx_sel",
1683 .ops = &clk_regmap_mux_ops,
1684 .parent_names = g12a_cts_hdmi_tx_parent_names,
1685 .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_names),
1686 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1687 },
1688};
1689
1690static struct clk_regmap g12a_cts_enci = {
1691 .data = &(struct clk_regmap_gate_data){
1692 .offset = HHI_VID_CLK_CNTL2,
1693 .bit_idx = 0,
1694 },
1695 .hw.init = &(struct clk_init_data) {
1696 .name = "cts_enci",
1697 .ops = &clk_regmap_gate_ops,
1698 .parent_names = (const char *[]){ "cts_enci_sel" },
1699 .num_parents = 1,
1700 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1701 },
1702};
1703
1704static struct clk_regmap g12a_cts_encp = {
1705 .data = &(struct clk_regmap_gate_data){
1706 .offset = HHI_VID_CLK_CNTL2,
1707 .bit_idx = 2,
1708 },
1709 .hw.init = &(struct clk_init_data) {
1710 .name = "cts_encp",
1711 .ops = &clk_regmap_gate_ops,
1712 .parent_names = (const char *[]){ "cts_encp_sel" },
1713 .num_parents = 1,
1714 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1715 },
1716};
1717
1718static struct clk_regmap g12a_cts_vdac = {
1719 .data = &(struct clk_regmap_gate_data){
1720 .offset = HHI_VID_CLK_CNTL2,
1721 .bit_idx = 4,
1722 },
1723 .hw.init = &(struct clk_init_data) {
1724 .name = "cts_vdac",
1725 .ops = &clk_regmap_gate_ops,
1726 .parent_names = (const char *[]){ "cts_vdac_sel" },
1727 .num_parents = 1,
1728 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1729 },
1730};
1731
1732static struct clk_regmap g12a_hdmi_tx = {
1733 .data = &(struct clk_regmap_gate_data){
1734 .offset = HHI_VID_CLK_CNTL2,
1735 .bit_idx = 5,
1736 },
1737 .hw.init = &(struct clk_init_data) {
1738 .name = "hdmi_tx",
1739 .ops = &clk_regmap_gate_ops,
1740 .parent_names = (const char *[]){ "hdmi_tx_sel" },
1741 .num_parents = 1,
1742 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1743 },
1744};
1745
1746/* HDMI Clocks */
1747
1748static const char * const g12a_hdmi_parent_names[] = {
1749 IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
1750};
1751
1752static struct clk_regmap g12a_hdmi_sel = {
1753 .data = &(struct clk_regmap_mux_data){
1754 .offset = HHI_HDMI_CLK_CNTL,
1755 .mask = 0x3,
1756 .shift = 9,
1757 .flags = CLK_MUX_ROUND_CLOSEST,
1758 },
1759 .hw.init = &(struct clk_init_data){
1760 .name = "hdmi_sel",
1761 .ops = &clk_regmap_mux_ops,
1762 .parent_names = g12a_hdmi_parent_names,
1763 .num_parents = ARRAY_SIZE(g12a_hdmi_parent_names),
1764 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1765 },
1766};
1767
1768static struct clk_regmap g12a_hdmi_div = {
1769 .data = &(struct clk_regmap_div_data){
1770 .offset = HHI_HDMI_CLK_CNTL,
1771 .shift = 0,
1772 .width = 7,
1773 },
1774 .hw.init = &(struct clk_init_data){
1775 .name = "hdmi_div",
1776 .ops = &clk_regmap_divider_ops,
1777 .parent_names = (const char *[]){ "hdmi_sel" },
1778 .num_parents = 1,
1779 .flags = CLK_GET_RATE_NOCACHE,
1780 },
1781};
1782
1783static struct clk_regmap g12a_hdmi = {
1784 .data = &(struct clk_regmap_gate_data){
1785 .offset = HHI_HDMI_CLK_CNTL,
1786 .bit_idx = 8,
1787 },
1788 .hw.init = &(struct clk_init_data) {
1789 .name = "hdmi",
1790 .ops = &clk_regmap_gate_ops,
1791 .parent_names = (const char *[]){ "hdmi_div" },
1792 .num_parents = 1,
1793 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1794 },
1795};
1796
1797/*
1798 * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
1799 * muxed by a glitch-free switch.
1800 */
1801
1802static const char * const g12a_mali_0_1_parent_names[] = {
1803 IN_PREFIX "xtal", "gp0_pll", "hihi_pll", "fclk_div2p5",
1804 "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7"
1805};
1806
1807static struct clk_regmap g12a_mali_0_sel = {
1808 .data = &(struct clk_regmap_mux_data){
1809 .offset = HHI_MALI_CLK_CNTL,
1810 .mask = 0x7,
1811 .shift = 9,
1812 },
1813 .hw.init = &(struct clk_init_data){
1814 .name = "mali_0_sel",
1815 .ops = &clk_regmap_mux_ops,
1816 .parent_names = g12a_mali_0_1_parent_names,
1817 .num_parents = 8,
1818 .flags = CLK_SET_RATE_NO_REPARENT,
1819 },
1820};
1821
1822static struct clk_regmap g12a_mali_0_div = {
1823 .data = &(struct clk_regmap_div_data){
1824 .offset = HHI_MALI_CLK_CNTL,
1825 .shift = 0,
1826 .width = 7,
1827 },
1828 .hw.init = &(struct clk_init_data){
1829 .name = "mali_0_div",
1830 .ops = &clk_regmap_divider_ops,
1831 .parent_names = (const char *[]){ "mali_0_sel" },
1832 .num_parents = 1,
1833 .flags = CLK_SET_RATE_NO_REPARENT,
1834 },
1835};
1836
1837static struct clk_regmap g12a_mali_0 = {
1838 .data = &(struct clk_regmap_gate_data){
1839 .offset = HHI_MALI_CLK_CNTL,
1840 .bit_idx = 8,
1841 },
1842 .hw.init = &(struct clk_init_data){
1843 .name = "mali_0",
1844 .ops = &clk_regmap_gate_ops,
1845 .parent_names = (const char *[]){ "mali_0_div" },
1846 .num_parents = 1,
1847 .flags = CLK_SET_RATE_PARENT,
1848 },
1849};
1850
1851static struct clk_regmap g12a_mali_1_sel = {
1852 .data = &(struct clk_regmap_mux_data){
1853 .offset = HHI_MALI_CLK_CNTL,
1854 .mask = 0x7,
1855 .shift = 25,
1856 },
1857 .hw.init = &(struct clk_init_data){
1858 .name = "mali_1_sel",
1859 .ops = &clk_regmap_mux_ops,
1860 .parent_names = g12a_mali_0_1_parent_names,
1861 .num_parents = 8,
1862 .flags = CLK_SET_RATE_NO_REPARENT,
1863 },
1864};
1865
1866static struct clk_regmap g12a_mali_1_div = {
1867 .data = &(struct clk_regmap_div_data){
1868 .offset = HHI_MALI_CLK_CNTL,
1869 .shift = 16,
1870 .width = 7,
1871 },
1872 .hw.init = &(struct clk_init_data){
1873 .name = "mali_1_div",
1874 .ops = &clk_regmap_divider_ops,
1875 .parent_names = (const char *[]){ "mali_1_sel" },
1876 .num_parents = 1,
1877 .flags = CLK_SET_RATE_NO_REPARENT,
1878 },
1879};
1880
1881static struct clk_regmap g12a_mali_1 = {
1882 .data = &(struct clk_regmap_gate_data){
1883 .offset = HHI_MALI_CLK_CNTL,
1884 .bit_idx = 24,
1885 },
1886 .hw.init = &(struct clk_init_data){
1887 .name = "mali_1",
1888 .ops = &clk_regmap_gate_ops,
1889 .parent_names = (const char *[]){ "mali_1_div" },
1890 .num_parents = 1,
1891 .flags = CLK_SET_RATE_PARENT,
1892 },
1893};
1894
1895static const char * const g12a_mali_parent_names[] = {
1896 "mali_0", "mali_1"
1897};
1898
1899static struct clk_regmap g12a_mali = {
1900 .data = &(struct clk_regmap_mux_data){
1901 .offset = HHI_MALI_CLK_CNTL,
1902 .mask = 1,
1903 .shift = 31,
1904 },
1905 .hw.init = &(struct clk_init_data){
1906 .name = "mali",
1907 .ops = &clk_regmap_mux_ops,
1908 .parent_names = g12a_mali_parent_names,
1909 .num_parents = 2,
1910 .flags = CLK_SET_RATE_NO_REPARENT,
1911 },
1912};
1913
1914/* Everything Else (EE) domain gates */
1915static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0);
1916static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1);
1917static MESON_GATE(g12a_audio_locker, HHI_GCLK_MPEG0, 2);
1918static MESON_GATE(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
1919static MESON_GATE(g12a_eth_phy, HHI_GCLK_MPEG0, 4);
1920static MESON_GATE(g12a_isa, HHI_GCLK_MPEG0, 5);
1921static MESON_GATE(g12a_pl301, HHI_GCLK_MPEG0, 6);
1922static MESON_GATE(g12a_periphs, HHI_GCLK_MPEG0, 7);
1923static MESON_GATE(g12a_spicc_0, HHI_GCLK_MPEG0, 8);
1924static MESON_GATE(g12a_i2c, HHI_GCLK_MPEG0, 9);
1925static MESON_GATE(g12a_sana, HHI_GCLK_MPEG0, 10);
1926static MESON_GATE(g12a_sd, HHI_GCLK_MPEG0, 11);
1927static MESON_GATE(g12a_rng0, HHI_GCLK_MPEG0, 12);
1928static MESON_GATE(g12a_uart0, HHI_GCLK_MPEG0, 13);
1929static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
1930static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
1931static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
1932static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
1933static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4);
1934static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
1935static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
1936static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);
1937
1938static MESON_GATE(g12a_audio, HHI_GCLK_MPEG1, 0);
1939static MESON_GATE(g12a_eth_core, HHI_GCLK_MPEG1, 3);
1940static MESON_GATE(g12a_demux, HHI_GCLK_MPEG1, 4);
1941static MESON_GATE(g12a_audio_ififo, HHI_GCLK_MPEG1, 11);
1942static MESON_GATE(g12a_adc, HHI_GCLK_MPEG1, 13);
1943static MESON_GATE(g12a_uart1, HHI_GCLK_MPEG1, 16);
1944static MESON_GATE(g12a_g2d, HHI_GCLK_MPEG1, 20);
1945static MESON_GATE(g12a_reset, HHI_GCLK_MPEG1, 23);
1946static MESON_GATE(g12a_pcie_comb, HHI_GCLK_MPEG1, 24);
1947static MESON_GATE(g12a_parser, HHI_GCLK_MPEG1, 25);
1948static MESON_GATE(g12a_usb_general, HHI_GCLK_MPEG1, 26);
1949static MESON_GATE(g12a_pcie_phy, HHI_GCLK_MPEG1, 27);
1950static MESON_GATE(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29);
1951
1952static MESON_GATE(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1);
1953static MESON_GATE(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
1954static MESON_GATE(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3);
1955static MESON_GATE(g12a_htx_pclk, HHI_GCLK_MPEG2, 4);
1956static MESON_GATE(g12a_bt656, HHI_GCLK_MPEG2, 6);
1957static MESON_GATE(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
1958static MESON_GATE(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11);
1959static MESON_GATE(g12a_uart2, HHI_GCLK_MPEG2, 15);
1960static MESON_GATE(g12a_vpu_intr, HHI_GCLK_MPEG2, 25);
1961static MESON_GATE(g12a_gic, HHI_GCLK_MPEG2, 30);
1962
1963static MESON_GATE(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1);
1964static MESON_GATE(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2);
1965static MESON_GATE(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3);
1966static MESON_GATE(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4);
1967static MESON_GATE(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5);
1968static MESON_GATE(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6);
1969static MESON_GATE(g12a_vclk2_other, HHI_GCLK_OTHER, 7);
1970static MESON_GATE(g12a_vclk2_enci, HHI_GCLK_OTHER, 8);
1971static MESON_GATE(g12a_vclk2_encp, HHI_GCLK_OTHER, 9);
1972static MESON_GATE(g12a_dac_clk, HHI_GCLK_OTHER, 10);
1973static MESON_GATE(g12a_aoclk_gate, HHI_GCLK_OTHER, 14);
1974static MESON_GATE(g12a_iec958_gate, HHI_GCLK_OTHER, 16);
1975static MESON_GATE(g12a_enc480p, HHI_GCLK_OTHER, 20);
1976static MESON_GATE(g12a_rng1, HHI_GCLK_OTHER, 21);
1977static MESON_GATE(g12a_vclk2_enct, HHI_GCLK_OTHER, 22);
1978static MESON_GATE(g12a_vclk2_encl, HHI_GCLK_OTHER, 23);
1979static MESON_GATE(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24);
1980static MESON_GATE(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25);
1981static MESON_GATE(g12a_vclk2_other1, HHI_GCLK_OTHER, 26);
1982
1983static MESON_GATE_RO(g12a_dma, HHI_GCLK_OTHER2, 0);
1984static MESON_GATE_RO(g12a_efuse, HHI_GCLK_OTHER2, 1);
1985static MESON_GATE_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2);
1986static MESON_GATE_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3);
1987static MESON_GATE_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4);
1988
1989/* Array of all clocks provided by this provider */
1990static struct clk_hw_onecell_data g12a_hw_onecell_data = {
1991 .hws = {
1992 [CLKID_SYS_PLL] = &g12a_sys_pll.hw,
1993 [CLKID_FIXED_PLL] = &g12a_fixed_pll.hw,
1994 [CLKID_FCLK_DIV2] = &g12a_fclk_div2.hw,
1995 [CLKID_FCLK_DIV3] = &g12a_fclk_div3.hw,
1996 [CLKID_FCLK_DIV4] = &g12a_fclk_div4.hw,
1997 [CLKID_FCLK_DIV5] = &g12a_fclk_div5.hw,
1998 [CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
1999 [CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
2000 [CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
2001 [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
2002 [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
2003 [CLKID_CLK81] = &g12a_clk81.hw,
2004 [CLKID_MPLL0] = &g12a_mpll0.hw,
2005 [CLKID_MPLL1] = &g12a_mpll1.hw,
2006 [CLKID_MPLL2] = &g12a_mpll2.hw,
2007 [CLKID_MPLL3] = &g12a_mpll3.hw,
2008 [CLKID_DDR] = &g12a_ddr.hw,
2009 [CLKID_DOS] = &g12a_dos.hw,
2010 [CLKID_AUDIO_LOCKER] = &g12a_audio_locker.hw,
2011 [CLKID_MIPI_DSI_HOST] = &g12a_mipi_dsi_host.hw,
2012 [CLKID_ETH_PHY] = &g12a_eth_phy.hw,
2013 [CLKID_ISA] = &g12a_isa.hw,
2014 [CLKID_PL301] = &g12a_pl301.hw,
2015 [CLKID_PERIPHS] = &g12a_periphs.hw,
2016 [CLKID_SPICC0] = &g12a_spicc_0.hw,
2017 [CLKID_I2C] = &g12a_i2c.hw,
2018 [CLKID_SANA] = &g12a_sana.hw,
2019 [CLKID_SD] = &g12a_sd.hw,
2020 [CLKID_RNG0] = &g12a_rng0.hw,
2021 [CLKID_UART0] = &g12a_uart0.hw,
2022 [CLKID_SPICC1] = &g12a_spicc_1.hw,
2023 [CLKID_HIU_IFACE] = &g12a_hiu_reg.hw,
2024 [CLKID_MIPI_DSI_PHY] = &g12a_mipi_dsi_phy.hw,
2025 [CLKID_ASSIST_MISC] = &g12a_assist_misc.hw,
2026 [CLKID_SD_EMMC_A] = &g12a_emmc_a.hw,
2027 [CLKID_SD_EMMC_B] = &g12a_emmc_b.hw,
2028 [CLKID_SD_EMMC_C] = &g12a_emmc_c.hw,
2029 [CLKID_AUDIO_CODEC] = &g12a_audio_codec.hw,
2030 [CLKID_AUDIO] = &g12a_audio.hw,
2031 [CLKID_ETH] = &g12a_eth_core.hw,
2032 [CLKID_DEMUX] = &g12a_demux.hw,
2033 [CLKID_AUDIO_IFIFO] = &g12a_audio_ififo.hw,
2034 [CLKID_ADC] = &g12a_adc.hw,
2035 [CLKID_UART1] = &g12a_uart1.hw,
2036 [CLKID_G2D] = &g12a_g2d.hw,
2037 [CLKID_RESET] = &g12a_reset.hw,
2038 [CLKID_PCIE_COMB] = &g12a_pcie_comb.hw,
2039 [CLKID_PARSER] = &g12a_parser.hw,
2040 [CLKID_USB] = &g12a_usb_general.hw,
2041 [CLKID_PCIE_PHY] = &g12a_pcie_phy.hw,
2042 [CLKID_AHB_ARB0] = &g12a_ahb_arb0.hw,
2043 [CLKID_AHB_DATA_BUS] = &g12a_ahb_data_bus.hw,
2044 [CLKID_AHB_CTRL_BUS] = &g12a_ahb_ctrl_bus.hw,
2045 [CLKID_HTX_HDCP22] = &g12a_htx_hdcp22.hw,
2046 [CLKID_HTX_PCLK] = &g12a_htx_pclk.hw,
2047 [CLKID_BT656] = &g12a_bt656.hw,
2048 [CLKID_USB1_DDR_BRIDGE] = &g12a_usb1_to_ddr.hw,
2049 [CLKID_MMC_PCLK] = &g12a_mmc_pclk.hw,
2050 [CLKID_UART2] = &g12a_uart2.hw,
2051 [CLKID_VPU_INTR] = &g12a_vpu_intr.hw,
2052 [CLKID_GIC] = &g12a_gic.hw,
2053 [CLKID_SD_EMMC_A_CLK0_SEL] = &g12a_sd_emmc_a_clk0_sel.hw,
2054 [CLKID_SD_EMMC_A_CLK0_DIV] = &g12a_sd_emmc_a_clk0_div.hw,
2055 [CLKID_SD_EMMC_A_CLK0] = &g12a_sd_emmc_a_clk0.hw,
2056 [CLKID_SD_EMMC_B_CLK0_SEL] = &g12a_sd_emmc_b_clk0_sel.hw,
2057 [CLKID_SD_EMMC_B_CLK0_DIV] = &g12a_sd_emmc_b_clk0_div.hw,
2058 [CLKID_SD_EMMC_B_CLK0] = &g12a_sd_emmc_b_clk0.hw,
2059 [CLKID_SD_EMMC_C_CLK0_SEL] = &g12a_sd_emmc_c_clk0_sel.hw,
2060 [CLKID_SD_EMMC_C_CLK0_DIV] = &g12a_sd_emmc_c_clk0_div.hw,
2061 [CLKID_SD_EMMC_C_CLK0] = &g12a_sd_emmc_c_clk0.hw,
2062 [CLKID_MPLL0_DIV] = &g12a_mpll0_div.hw,
2063 [CLKID_MPLL1_DIV] = &g12a_mpll1_div.hw,
2064 [CLKID_MPLL2_DIV] = &g12a_mpll2_div.hw,
2065 [CLKID_MPLL3_DIV] = &g12a_mpll3_div.hw,
2066 [CLKID_FCLK_DIV2_DIV] = &g12a_fclk_div2_div.hw,
2067 [CLKID_FCLK_DIV3_DIV] = &g12a_fclk_div3_div.hw,
2068 [CLKID_FCLK_DIV4_DIV] = &g12a_fclk_div4_div.hw,
2069 [CLKID_FCLK_DIV5_DIV] = &g12a_fclk_div5_div.hw,
2070 [CLKID_FCLK_DIV7_DIV] = &g12a_fclk_div7_div.hw,
2071 [CLKID_FCLK_DIV2P5_DIV] = &g12a_fclk_div2p5_div.hw,
2072 [CLKID_HIFI_PLL] = &g12a_hifi_pll.hw,
2073 [CLKID_VCLK2_VENCI0] = &g12a_vclk2_venci0.hw,
2074 [CLKID_VCLK2_VENCI1] = &g12a_vclk2_venci1.hw,
2075 [CLKID_VCLK2_VENCP0] = &g12a_vclk2_vencp0.hw,
2076 [CLKID_VCLK2_VENCP1] = &g12a_vclk2_vencp1.hw,
2077 [CLKID_VCLK2_VENCT0] = &g12a_vclk2_venct0.hw,
2078 [CLKID_VCLK2_VENCT1] = &g12a_vclk2_venct1.hw,
2079 [CLKID_VCLK2_OTHER] = &g12a_vclk2_other.hw,
2080 [CLKID_VCLK2_ENCI] = &g12a_vclk2_enci.hw,
2081 [CLKID_VCLK2_ENCP] = &g12a_vclk2_encp.hw,
2082 [CLKID_DAC_CLK] = &g12a_dac_clk.hw,
2083 [CLKID_AOCLK] = &g12a_aoclk_gate.hw,
2084 [CLKID_IEC958] = &g12a_iec958_gate.hw,
2085 [CLKID_ENC480P] = &g12a_enc480p.hw,
2086 [CLKID_RNG1] = &g12a_rng1.hw,
2087 [CLKID_VCLK2_ENCT] = &g12a_vclk2_enct.hw,
2088 [CLKID_VCLK2_ENCL] = &g12a_vclk2_encl.hw,
2089 [CLKID_VCLK2_VENCLMMC] = &g12a_vclk2_venclmmc.hw,
2090 [CLKID_VCLK2_VENCL] = &g12a_vclk2_vencl.hw,
2091 [CLKID_VCLK2_OTHER1] = &g12a_vclk2_other1.hw,
2092 [CLKID_FIXED_PLL_DCO] = &g12a_fixed_pll_dco.hw,
2093 [CLKID_SYS_PLL_DCO] = &g12a_sys_pll_dco.hw,
2094 [CLKID_GP0_PLL_DCO] = &g12a_gp0_pll_dco.hw,
2095 [CLKID_HIFI_PLL_DCO] = &g12a_hifi_pll_dco.hw,
2096 [CLKID_DMA] = &g12a_dma.hw,
2097 [CLKID_EFUSE] = &g12a_efuse.hw,
2098 [CLKID_ROM_BOOT] = &g12a_rom_boot.hw,
2099 [CLKID_RESET_SEC] = &g12a_reset_sec.hw,
2100 [CLKID_SEC_AHB_APB3] = &g12a_sec_ahb_apb3.hw,
2101 [CLKID_MPLL_PREDIV] = &g12a_mpll_prediv.hw,
2102 [CLKID_VPU_0_SEL] = &g12a_vpu_0_sel.hw,
2103 [CLKID_VPU_0_DIV] = &g12a_vpu_0_div.hw,
2104 [CLKID_VPU_0] = &g12a_vpu_0.hw,
2105 [CLKID_VPU_1_SEL] = &g12a_vpu_1_sel.hw,
2106 [CLKID_VPU_1_DIV] = &g12a_vpu_1_div.hw,
2107 [CLKID_VPU_1] = &g12a_vpu_1.hw,
2108 [CLKID_VPU] = &g12a_vpu.hw,
2109 [CLKID_VAPB_0_SEL] = &g12a_vapb_0_sel.hw,
2110 [CLKID_VAPB_0_DIV] = &g12a_vapb_0_div.hw,
2111 [CLKID_VAPB_0] = &g12a_vapb_0.hw,
2112 [CLKID_VAPB_1_SEL] = &g12a_vapb_1_sel.hw,
2113 [CLKID_VAPB_1_DIV] = &g12a_vapb_1_div.hw,
2114 [CLKID_VAPB_1] = &g12a_vapb_1.hw,
2115 [CLKID_VAPB_SEL] = &g12a_vapb_sel.hw,
2116 [CLKID_VAPB] = &g12a_vapb.hw,
2117 [CLKID_HDMI_PLL_DCO] = &g12a_hdmi_pll_dco.hw,
2118 [CLKID_HDMI_PLL_OD] = &g12a_hdmi_pll_od.hw,
2119 [CLKID_HDMI_PLL_OD2] = &g12a_hdmi_pll_od2.hw,
2120 [CLKID_HDMI_PLL] = &g12a_hdmi_pll.hw,
2121 [CLKID_VID_PLL] = &g12a_vid_pll_div.hw,
2122 [CLKID_VID_PLL_SEL] = &g12a_vid_pll_sel.hw,
2123 [CLKID_VID_PLL_DIV] = &g12a_vid_pll.hw,
2124 [CLKID_VCLK_SEL] = &g12a_vclk_sel.hw,
2125 [CLKID_VCLK2_SEL] = &g12a_vclk2_sel.hw,
2126 [CLKID_VCLK_INPUT] = &g12a_vclk_input.hw,
2127 [CLKID_VCLK2_INPUT] = &g12a_vclk2_input.hw,
2128 [CLKID_VCLK_DIV] = &g12a_vclk_div.hw,
2129 [CLKID_VCLK2_DIV] = &g12a_vclk2_div.hw,
2130 [CLKID_VCLK] = &g12a_vclk.hw,
2131 [CLKID_VCLK2] = &g12a_vclk2.hw,
2132 [CLKID_VCLK_DIV1] = &g12a_vclk_div1.hw,
2133 [CLKID_VCLK_DIV2_EN] = &g12a_vclk_div2_en.hw,
2134 [CLKID_VCLK_DIV4_EN] = &g12a_vclk_div4_en.hw,
2135 [CLKID_VCLK_DIV6_EN] = &g12a_vclk_div6_en.hw,
2136 [CLKID_VCLK_DIV12_EN] = &g12a_vclk_div12_en.hw,
2137 [CLKID_VCLK2_DIV1] = &g12a_vclk2_div1.hw,
2138 [CLKID_VCLK2_DIV2_EN] = &g12a_vclk2_div2_en.hw,
2139 [CLKID_VCLK2_DIV4_EN] = &g12a_vclk2_div4_en.hw,
2140 [CLKID_VCLK2_DIV6_EN] = &g12a_vclk2_div6_en.hw,
2141 [CLKID_VCLK2_DIV12_EN] = &g12a_vclk2_div12_en.hw,
2142 [CLKID_VCLK_DIV2] = &g12a_vclk_div2.hw,
2143 [CLKID_VCLK_DIV4] = &g12a_vclk_div4.hw,
2144 [CLKID_VCLK_DIV6] = &g12a_vclk_div6.hw,
2145 [CLKID_VCLK_DIV12] = &g12a_vclk_div12.hw,
2146 [CLKID_VCLK2_DIV2] = &g12a_vclk2_div2.hw,
2147 [CLKID_VCLK2_DIV4] = &g12a_vclk2_div4.hw,
2148 [CLKID_VCLK2_DIV6] = &g12a_vclk2_div6.hw,
2149 [CLKID_VCLK2_DIV12] = &g12a_vclk2_div12.hw,
2150 [CLKID_CTS_ENCI_SEL] = &g12a_cts_enci_sel.hw,
2151 [CLKID_CTS_ENCP_SEL] = &g12a_cts_encp_sel.hw,
2152 [CLKID_CTS_VDAC_SEL] = &g12a_cts_vdac_sel.hw,
2153 [CLKID_HDMI_TX_SEL] = &g12a_hdmi_tx_sel.hw,
2154 [CLKID_CTS_ENCI] = &g12a_cts_enci.hw,
2155 [CLKID_CTS_ENCP] = &g12a_cts_encp.hw,
2156 [CLKID_CTS_VDAC] = &g12a_cts_vdac.hw,
2157 [CLKID_HDMI_TX] = &g12a_hdmi_tx.hw,
2158 [CLKID_HDMI_SEL] = &g12a_hdmi_sel.hw,
2159 [CLKID_HDMI_DIV] = &g12a_hdmi_div.hw,
2160 [CLKID_HDMI] = &g12a_hdmi.hw,
2161 [CLKID_MALI_0_SEL] = &g12a_mali_0_sel.hw,
2162 [CLKID_MALI_0_DIV] = &g12a_mali_0_div.hw,
2163 [CLKID_MALI_0] = &g12a_mali_0.hw,
2164 [CLKID_MALI_1_SEL] = &g12a_mali_1_sel.hw,
2165 [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw,
2166 [CLKID_MALI_1] = &g12a_mali_1.hw,
2167 [CLKID_MALI] = &g12a_mali.hw,
2168 [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw,
2169 [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw,
2170 [NR_CLKS] = NULL,
2171 },
2172 .num = NR_CLKS,
2173};
2174
2175/* Convenience table to populate regmap in .probe */
2176static struct clk_regmap *const g12a_clk_regmaps[] = {
2177 &g12a_clk81,
2178 &g12a_dos,
2179 &g12a_ddr,
2180 &g12a_audio_locker,
2181 &g12a_mipi_dsi_host,
2182 &g12a_eth_phy,
2183 &g12a_isa,
2184 &g12a_pl301,
2185 &g12a_periphs,
2186 &g12a_spicc_0,
2187 &g12a_i2c,
2188 &g12a_sana,
2189 &g12a_sd,
2190 &g12a_rng0,
2191 &g12a_uart0,
2192 &g12a_spicc_1,
2193 &g12a_hiu_reg,
2194 &g12a_mipi_dsi_phy,
2195 &g12a_assist_misc,
2196 &g12a_emmc_a,
2197 &g12a_emmc_b,
2198 &g12a_emmc_c,
2199 &g12a_audio_codec,
2200 &g12a_audio,
2201 &g12a_eth_core,
2202 &g12a_demux,
2203 &g12a_audio_ififo,
2204 &g12a_adc,
2205 &g12a_uart1,
2206 &g12a_g2d,
2207 &g12a_reset,
2208 &g12a_pcie_comb,
2209 &g12a_parser,
2210 &g12a_usb_general,
2211 &g12a_pcie_phy,
2212 &g12a_ahb_arb0,
2213 &g12a_ahb_data_bus,
2214 &g12a_ahb_ctrl_bus,
2215 &g12a_htx_hdcp22,
2216 &g12a_htx_pclk,
2217 &g12a_bt656,
2218 &g12a_usb1_to_ddr,
2219 &g12a_mmc_pclk,
2220 &g12a_vpu_intr,
2221 &g12a_gic,
2222 &g12a_sd_emmc_a_clk0,
2223 &g12a_sd_emmc_b_clk0,
2224 &g12a_sd_emmc_c_clk0,
2225 &g12a_mpeg_clk_div,
2226 &g12a_sd_emmc_a_clk0_div,
2227 &g12a_sd_emmc_b_clk0_div,
2228 &g12a_sd_emmc_c_clk0_div,
2229 &g12a_mpeg_clk_sel,
2230 &g12a_sd_emmc_a_clk0_sel,
2231 &g12a_sd_emmc_b_clk0_sel,
2232 &g12a_sd_emmc_c_clk0_sel,
2233 &g12a_mpll0,
2234 &g12a_mpll1,
2235 &g12a_mpll2,
2236 &g12a_mpll3,
2237 &g12a_mpll0_div,
2238 &g12a_mpll1_div,
2239 &g12a_mpll2_div,
2240 &g12a_mpll3_div,
2241 &g12a_fixed_pll,
2242 &g12a_sys_pll,
2243 &g12a_gp0_pll,
2244 &g12a_hifi_pll,
2245 &g12a_vclk2_venci0,
2246 &g12a_vclk2_venci1,
2247 &g12a_vclk2_vencp0,
2248 &g12a_vclk2_vencp1,
2249 &g12a_vclk2_venct0,
2250 &g12a_vclk2_venct1,
2251 &g12a_vclk2_other,
2252 &g12a_vclk2_enci,
2253 &g12a_vclk2_encp,
2254 &g12a_dac_clk,
2255 &g12a_aoclk_gate,
2256 &g12a_iec958_gate,
2257 &g12a_enc480p,
2258 &g12a_rng1,
2259 &g12a_vclk2_enct,
2260 &g12a_vclk2_encl,
2261 &g12a_vclk2_venclmmc,
2262 &g12a_vclk2_vencl,
2263 &g12a_vclk2_other1,
2264 &g12a_fixed_pll_dco,
2265 &g12a_sys_pll_dco,
2266 &g12a_gp0_pll_dco,
2267 &g12a_hifi_pll_dco,
2268 &g12a_fclk_div2,
2269 &g12a_fclk_div3,
2270 &g12a_fclk_div4,
2271 &g12a_fclk_div5,
2272 &g12a_fclk_div7,
2273 &g12a_fclk_div2p5,
2274 &g12a_dma,
2275 &g12a_efuse,
2276 &g12a_rom_boot,
2277 &g12a_reset_sec,
2278 &g12a_sec_ahb_apb3,
2279 &g12a_vpu_0_sel,
2280 &g12a_vpu_0_div,
2281 &g12a_vpu_0,
2282 &g12a_vpu_1_sel,
2283 &g12a_vpu_1_div,
2284 &g12a_vpu_1,
2285 &g12a_vpu,
2286 &g12a_vapb_0_sel,
2287 &g12a_vapb_0_div,
2288 &g12a_vapb_0,
2289 &g12a_vapb_1_sel,
2290 &g12a_vapb_1_div,
2291 &g12a_vapb_1,
2292 &g12a_vapb_sel,
2293 &g12a_vapb,
2294 &g12a_hdmi_pll_dco,
2295 &g12a_hdmi_pll_od,
2296 &g12a_hdmi_pll_od2,
2297 &g12a_hdmi_pll,
2298 &g12a_vid_pll_div,
2299 &g12a_vid_pll_sel,
2300 &g12a_vid_pll,
2301 &g12a_vclk_sel,
2302 &g12a_vclk2_sel,
2303 &g12a_vclk_input,
2304 &g12a_vclk2_input,
2305 &g12a_vclk_div,
2306 &g12a_vclk2_div,
2307 &g12a_vclk,
2308 &g12a_vclk2,
2309 &g12a_vclk_div1,
2310 &g12a_vclk_div2_en,
2311 &g12a_vclk_div4_en,
2312 &g12a_vclk_div6_en,
2313 &g12a_vclk_div12_en,
2314 &g12a_vclk2_div1,
2315 &g12a_vclk2_div2_en,
2316 &g12a_vclk2_div4_en,
2317 &g12a_vclk2_div6_en,
2318 &g12a_vclk2_div12_en,
2319 &g12a_cts_enci_sel,
2320 &g12a_cts_encp_sel,
2321 &g12a_cts_vdac_sel,
2322 &g12a_hdmi_tx_sel,
2323 &g12a_cts_enci,
2324 &g12a_cts_encp,
2325 &g12a_cts_vdac,
2326 &g12a_hdmi_tx,
2327 &g12a_hdmi_sel,
2328 &g12a_hdmi_div,
2329 &g12a_hdmi,
2330 &g12a_mali_0_sel,
2331 &g12a_mali_0_div,
2332 &g12a_mali_0,
2333 &g12a_mali_1_sel,
2334 &g12a_mali_1_div,
2335 &g12a_mali_1,
2336 &g12a_mali,
2337 &g12a_mpll_50m,
2338};
2339
2340static const struct meson_eeclkc_data g12a_clkc_data = {
2341 .regmap_clks = g12a_clk_regmaps,
2342 .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
2343 .hw_onecell_data = &g12a_hw_onecell_data
2344};
2345
2346static const struct of_device_id clkc_match_table[] = {
2347 { .compatible = "amlogic,g12a-clkc", .data = &g12a_clkc_data },
2348 {}
2349};
2350
2351static struct platform_driver g12a_driver = {
2352 .probe = meson_eeclkc_probe,
2353 .driver = {
2354 .name = "g12a-clkc",
2355 .of_match_table = clkc_match_table,
2356 },
2357};
2358
2359builtin_platform_driver(g12a_driver);
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
new file mode 100644
index 000000000000..f399dfe1401c
--- /dev/null
+++ b/drivers/clk/meson/g12a.h
@@ -0,0 +1,175 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
2/*
3 * Copyright (c) 2016 Amlogic, Inc.
4 * Author: Michael Turquette <mturquette@baylibre.com>
5 *
6 * Copyright (c) 2018 Amlogic, inc.
7 * Author: Qiufang Dai <qiufang.dai@amlogic.com>
8 * Author: Jian Hu <jian.hu@amlogic.com>
9 *
10 */
11#ifndef __G12A_H
12#define __G12A_H
13
14/*
15 * Clock controller register offsets
16 *
17 * Register offsets from the data sheet must be multiplied by 4 before
18 * adding them to the base address to get the right value.
19 */
20#define HHI_MIPI_CNTL0 0x000
21#define HHI_MIPI_CNTL1 0x004
22#define HHI_MIPI_CNTL2 0x008
23#define HHI_MIPI_STS 0x00C
24#define HHI_GP0_PLL_CNTL0 0x040
25#define HHI_GP0_PLL_CNTL1 0x044
26#define HHI_GP0_PLL_CNTL2 0x048
27#define HHI_GP0_PLL_CNTL3 0x04C
28#define HHI_GP0_PLL_CNTL4 0x050
29#define HHI_GP0_PLL_CNTL5 0x054
30#define HHI_GP0_PLL_CNTL6 0x058
31#define HHI_GP0_PLL_STS 0x05C
32#define HHI_PCIE_PLL_CNTL0 0x098
33#define HHI_PCIE_PLL_CNTL1 0x09C
34#define HHI_PCIE_PLL_CNTL2 0x0A0
35#define HHI_PCIE_PLL_CNTL3 0x0A4
36#define HHI_PCIE_PLL_CNTL4 0x0A8
37#define HHI_PCIE_PLL_CNTL5 0x0AC
38#define HHI_PCIE_PLL_STS 0x0B8
39#define HHI_HIFI_PLL_CNTL0 0x0D8
40#define HHI_HIFI_PLL_CNTL1 0x0DC
41#define HHI_HIFI_PLL_CNTL2 0x0E0
42#define HHI_HIFI_PLL_CNTL3 0x0E4
43#define HHI_HIFI_PLL_CNTL4 0x0E8
44#define HHI_HIFI_PLL_CNTL5 0x0EC
45#define HHI_HIFI_PLL_CNTL6 0x0F0
46#define HHI_VIID_CLK_DIV 0x128
47#define HHI_VIID_CLK_CNTL 0x12C
48#define HHI_GCLK_MPEG0 0x140
49#define HHI_GCLK_MPEG1 0x144
50#define HHI_GCLK_MPEG2 0x148
51#define HHI_GCLK_OTHER 0x150
52#define HHI_GCLK_OTHER2 0x154
53#define HHI_VID_CLK_DIV 0x164
54#define HHI_MPEG_CLK_CNTL 0x174
55#define HHI_AUD_CLK_CNTL 0x178
56#define HHI_VID_CLK_CNTL 0x17c
57#define HHI_TS_CLK_CNTL 0x190
58#define HHI_VID_CLK_CNTL2 0x194
59#define HHI_SYS_CPU_CLK_CNTL0 0x19c
60#define HHI_VID_PLL_CLK_DIV 0x1A0
61#define HHI_MALI_CLK_CNTL 0x1b0
62#define HHI_VPU_CLKC_CNTL 0x1b4
63#define HHI_VPU_CLK_CNTL 0x1bC
64#define HHI_HDMI_CLK_CNTL 0x1CC
65#define HHI_VDEC_CLK_CNTL 0x1E0
66#define HHI_VDEC2_CLK_CNTL 0x1E4
67#define HHI_VDEC3_CLK_CNTL 0x1E8
68#define HHI_VDEC4_CLK_CNTL 0x1EC
69#define HHI_HDCP22_CLK_CNTL 0x1F0
70#define HHI_VAPBCLK_CNTL 0x1F4
71#define HHI_VPU_CLKB_CNTL 0x20C
72#define HHI_GEN_CLK_CNTL 0x228
73#define HHI_VDIN_MEAS_CLK_CNTL 0x250
74#define HHI_MIPIDSI_PHY_CLK_CNTL 0x254
75#define HHI_NAND_CLK_CNTL 0x25C
76#define HHI_SD_EMMC_CLK_CNTL 0x264
77#define HHI_MPLL_CNTL0 0x278
78#define HHI_MPLL_CNTL1 0x27C
79#define HHI_MPLL_CNTL2 0x280
80#define HHI_MPLL_CNTL3 0x284
81#define HHI_MPLL_CNTL4 0x288
82#define HHI_MPLL_CNTL5 0x28c
83#define HHI_MPLL_CNTL6 0x290
84#define HHI_MPLL_CNTL7 0x294
85#define HHI_MPLL_CNTL8 0x298
86#define HHI_FIX_PLL_CNTL0 0x2A0
87#define HHI_FIX_PLL_CNTL1 0x2A4
88#define HHI_FIX_PLL_CNTL3 0x2AC
89#define HHI_SYS_PLL_CNTL0 0x2f4
90#define HHI_SYS_PLL_CNTL1 0x2f8
91#define HHI_SYS_PLL_CNTL2 0x2fc
92#define HHI_SYS_PLL_CNTL3 0x300
93#define HHI_SYS_PLL_CNTL4 0x304
94#define HHI_SYS_PLL_CNTL5 0x308
95#define HHI_SYS_PLL_CNTL6 0x30c
96#define HHI_HDMI_PLL_CNTL0 0x320
97#define HHI_HDMI_PLL_CNTL1 0x324
98#define HHI_HDMI_PLL_CNTL2 0x328
99#define HHI_HDMI_PLL_CNTL3 0x32c
100#define HHI_HDMI_PLL_CNTL4 0x330
101#define HHI_HDMI_PLL_CNTL5 0x334
102#define HHI_HDMI_PLL_CNTL6 0x338
103#define HHI_SPICC_CLK_CNTL 0x3dc
104
105/*
106 * CLKID index values
107 *
108 * These indices are entirely contrived and do not map onto the hardware.
109 * It has now been decided to expose everything by default in the DT header:
110 * include/dt-bindings/clock/g12a-clkc.h. Only the clocks ids we don't want
111 * to expose, such as the internal muxes and dividers of composite clocks,
112 * will remain defined here.
113 */
114#define CLKID_MPEG_SEL 8
115#define CLKID_MPEG_DIV 9
116#define CLKID_SD_EMMC_A_CLK0_SEL 63
117#define CLKID_SD_EMMC_A_CLK0_DIV 64
118#define CLKID_SD_EMMC_B_CLK0_SEL 65
119#define CLKID_SD_EMMC_B_CLK0_DIV 66
120#define CLKID_SD_EMMC_C_CLK0_SEL 67
121#define CLKID_SD_EMMC_C_CLK0_DIV 68
122#define CLKID_MPLL0_DIV 69
123#define CLKID_MPLL1_DIV 70
124#define CLKID_MPLL2_DIV 71
125#define CLKID_MPLL3_DIV 72
126#define CLKID_MPLL_PREDIV 73
127#define CLKID_FCLK_DIV2_DIV 75
128#define CLKID_FCLK_DIV3_DIV 76
129#define CLKID_FCLK_DIV4_DIV 77
130#define CLKID_FCLK_DIV5_DIV 78
131#define CLKID_FCLK_DIV7_DIV 79
132#define CLKID_FCLK_DIV2P5_DIV 100
133#define CLKID_FIXED_PLL_DCO 101
134#define CLKID_SYS_PLL_DCO 102
135#define CLKID_GP0_PLL_DCO 103
136#define CLKID_HIFI_PLL_DCO 104
137#define CLKID_VPU_0_DIV 111
138#define CLKID_VPU_1_DIV 114
139#define CLKID_VAPB_0_DIV 118
140#define CLKID_VAPB_1_DIV 121
141#define CLKID_HDMI_PLL_DCO 125
142#define CLKID_HDMI_PLL_OD 126
143#define CLKID_HDMI_PLL_OD2 127
144#define CLKID_VID_PLL_SEL 130
145#define CLKID_VID_PLL_DIV 131
146#define CLKID_VCLK_SEL 132
147#define CLKID_VCLK2_SEL 133
148#define CLKID_VCLK_INPUT 134
149#define CLKID_VCLK2_INPUT 135
150#define CLKID_VCLK_DIV 136
151#define CLKID_VCLK2_DIV 137
152#define CLKID_VCLK_DIV2_EN 140
153#define CLKID_VCLK_DIV4_EN 141
154#define CLKID_VCLK_DIV6_EN 142
155#define CLKID_VCLK_DIV12_EN 143
156#define CLKID_VCLK2_DIV2_EN 144
157#define CLKID_VCLK2_DIV4_EN 145
158#define CLKID_VCLK2_DIV6_EN 146
159#define CLKID_VCLK2_DIV12_EN 147
160#define CLKID_CTS_ENCI_SEL 158
161#define CLKID_CTS_ENCP_SEL 159
162#define CLKID_CTS_VDAC_SEL 160
163#define CLKID_HDMI_TX_SEL 161
164#define CLKID_HDMI_SEL 166
165#define CLKID_HDMI_DIV 167
166#define CLKID_MALI_0_DIV 170
167#define CLKID_MALI_1_DIV 173
168#define CLKID_MPLL_5OM_DIV 176
169
170#define NR_CLKS 178
171
172/* include the CLKIDs that have been made part of the DT binding */
173#include <dt-bindings/clock/g12a-clkc.h>
174
175#endif /* __G12A_H */
diff --git a/drivers/clk/meson/gxbb-aoclk-32k.c b/drivers/clk/meson/gxbb-aoclk-32k.c
deleted file mode 100644
index 680467141a1d..000000000000
--- a/drivers/clk/meson/gxbb-aoclk-32k.c
+++ /dev/null
@@ -1,193 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2017 BayLibre, SAS.
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 */
6
7#include <linux/clk-provider.h>
8#include <linux/bitfield.h>
9#include <linux/regmap.h>
10#include "gxbb-aoclk.h"
11
12/*
13 * The AO Domain embeds a dual/divider to generate a more precise
14 * 32,768KHz clock for low-power suspend mode and CEC.
15 * ______ ______
16 * | | | |
17 * ______ | Div1 |-| Cnt1 | ______
18 * | | /|______| |______|\ | |
19 * Xtal-->| Gate |---| ______ ______ X-X--| Gate |-->
20 * |______| | \| | | |/ | |______|
21 * | | Div2 |-| Cnt2 | |
22 * | |______| |______| |
23 * |_______________________|
24 *
25 * The dividing can be switched to single or dual, with a counter
26 * for each divider to set when the switching is done.
27 * The entire dividing mechanism can be also bypassed.
28 */
29
30#define CLK_CNTL0_N1_MASK GENMASK(11, 0)
31#define CLK_CNTL0_N2_MASK GENMASK(23, 12)
32#define CLK_CNTL0_DUALDIV_EN BIT(28)
33#define CLK_CNTL0_OUT_GATE_EN BIT(30)
34#define CLK_CNTL0_IN_GATE_EN BIT(31)
35
36#define CLK_CNTL1_M1_MASK GENMASK(11, 0)
37#define CLK_CNTL1_M2_MASK GENMASK(23, 12)
38#define CLK_CNTL1_BYPASS_EN BIT(24)
39#define CLK_CNTL1_SELECT_OSC BIT(27)
40
41#define PWR_CNTL_ALT_32K_SEL GENMASK(13, 10)
42
43struct cec_32k_freq_table {
44 unsigned long parent_rate;
45 unsigned long target_rate;
46 bool dualdiv;
47 unsigned int n1;
48 unsigned int n2;
49 unsigned int m1;
50 unsigned int m2;
51};
52
53static const struct cec_32k_freq_table aoclk_cec_32k_table[] = {
54 [0] = {
55 .parent_rate = 24000000,
56 .target_rate = 32768,
57 .dualdiv = true,
58 .n1 = 733,
59 .n2 = 732,
60 .m1 = 8,
61 .m2 = 11,
62 },
63};
64
65/*
66 * If CLK_CNTL0_DUALDIV_EN == 0
67 * - will use N1 divider only
68 * If CLK_CNTL0_DUALDIV_EN == 1
69 * - hold M1 cycles of N1 divider then changes to N2
70 * - hold M2 cycles of N2 divider then changes to N1
71 * Then we can get more accurate division.
72 */
73static unsigned long aoclk_cec_32k_recalc_rate(struct clk_hw *hw,
74 unsigned long parent_rate)
75{
76 struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw);
77 unsigned long n1;
78 u32 reg0, reg1;
79
80 regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, &reg0);
81 regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, &reg1);
82
83 if (reg1 & CLK_CNTL1_BYPASS_EN)
84 return parent_rate;
85
86 if (reg0 & CLK_CNTL0_DUALDIV_EN) {
87 unsigned long n2, m1, m2, f1, f2, p1, p2;
88
89 n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1;
90 n2 = FIELD_GET(CLK_CNTL0_N2_MASK, reg0) + 1;
91
92 m1 = FIELD_GET(CLK_CNTL1_M1_MASK, reg1) + 1;
93 m2 = FIELD_GET(CLK_CNTL1_M2_MASK, reg1) + 1;
94
95 f1 = DIV_ROUND_CLOSEST(parent_rate, n1);
96 f2 = DIV_ROUND_CLOSEST(parent_rate, n2);
97
98 p1 = DIV_ROUND_CLOSEST(100000000 * m1, f1 * (m1 + m2));
99 p2 = DIV_ROUND_CLOSEST(100000000 * m2, f2 * (m1 + m2));
100
101 return DIV_ROUND_UP(100000000, p1 + p2);
102 }
103
104 n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1;
105
106 return DIV_ROUND_CLOSEST(parent_rate, n1);
107}
108
109static const struct cec_32k_freq_table *find_cec_32k_freq(unsigned long rate,
110 unsigned long prate)
111{
112 int i;
113
114 for (i = 0 ; i < ARRAY_SIZE(aoclk_cec_32k_table) ; ++i)
115 if (aoclk_cec_32k_table[i].parent_rate == prate &&
116 aoclk_cec_32k_table[i].target_rate == rate)
117 return &aoclk_cec_32k_table[i];
118
119 return NULL;
120}
121
122static long aoclk_cec_32k_round_rate(struct clk_hw *hw, unsigned long rate,
123 unsigned long *prate)
124{
125 const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate,
126 *prate);
127
128 /* If invalid return first one */
129 if (!freq)
130 return aoclk_cec_32k_table[0].target_rate;
131
132 return freq->target_rate;
133}
134
135/*
136 * From the Amlogic init procedure, the IN and OUT gates needs to be handled
137 * in the init procedure to avoid any glitches.
138 */
139
140static int aoclk_cec_32k_set_rate(struct clk_hw *hw, unsigned long rate,
141 unsigned long parent_rate)
142{
143 const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate,
144 parent_rate);
145 struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw);
146 u32 reg = 0;
147
148 if (!freq)
149 return -EINVAL;
150
151 /* Disable clock */
152 regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
153 CLK_CNTL0_IN_GATE_EN | CLK_CNTL0_OUT_GATE_EN, 0);
154
155 reg = FIELD_PREP(CLK_CNTL0_N1_MASK, freq->n1 - 1);
156 if (freq->dualdiv)
157 reg |= CLK_CNTL0_DUALDIV_EN |
158 FIELD_PREP(CLK_CNTL0_N2_MASK, freq->n2 - 1);
159
160 regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, reg);
161
162 reg = FIELD_PREP(CLK_CNTL1_M1_MASK, freq->m1 - 1);
163 if (freq->dualdiv)
164 reg |= FIELD_PREP(CLK_CNTL1_M2_MASK, freq->m2 - 1);
165
166 regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, reg);
167
168 /* Enable clock */
169 regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
170 CLK_CNTL0_IN_GATE_EN, CLK_CNTL0_IN_GATE_EN);
171
172 udelay(200);
173
174 regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
175 CLK_CNTL0_OUT_GATE_EN, CLK_CNTL0_OUT_GATE_EN);
176
177 regmap_update_bits(cec_32k->regmap, AO_CRT_CLK_CNTL1,
178 CLK_CNTL1_SELECT_OSC, CLK_CNTL1_SELECT_OSC);
179
180 /* Select 32k from XTAL */
181 regmap_update_bits(cec_32k->regmap,
182 AO_RTI_PWR_CNTL_REG0,
183 PWR_CNTL_ALT_32K_SEL,
184 FIELD_PREP(PWR_CNTL_ALT_32K_SEL, 4));
185
186 return 0;
187}
188
189const struct clk_ops meson_aoclk_cec_32k_ops = {
190 .recalc_rate = aoclk_cec_32k_recalc_rate,
191 .round_rate = aoclk_cec_32k_round_rate,
192 .set_rate = aoclk_cec_32k_set_rate,
193};
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 42ed61d3c3fb..449f6ac189d8 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -5,10 +5,23 @@
5 */ 5 */
6#include <linux/platform_device.h> 6#include <linux/platform_device.h>
7#include <linux/mfd/syscon.h> 7#include <linux/mfd/syscon.h>
8#include "clk-regmap.h"
9#include "meson-aoclk.h" 8#include "meson-aoclk.h"
10#include "gxbb-aoclk.h" 9#include "gxbb-aoclk.h"
11 10
11#include "clk-regmap.h"
12#include "clk-dualdiv.h"
13
14#define IN_PREFIX "ao-in-"
15
16/* AO Configuration Clock registers offsets */
17#define AO_RTI_PWR_CNTL_REG1 0x0c
18#define AO_RTI_PWR_CNTL_REG0 0x10
19#define AO_RTI_GEN_CNTL_REG0 0x40
20#define AO_OSCIN_CNTL 0x58
21#define AO_CRT_CLK_CNTL1 0x68
22#define AO_RTC_ALT_CLK_CNTL0 0x94
23#define AO_RTC_ALT_CLK_CNTL1 0x98
24
12#define GXBB_AO_GATE(_name, _bit) \ 25#define GXBB_AO_GATE(_name, _bit) \
13static struct clk_regmap _name##_ao = { \ 26static struct clk_regmap _name##_ao = { \
14 .data = &(struct clk_regmap_gate_data) { \ 27 .data = &(struct clk_regmap_gate_data) { \
@@ -18,7 +31,7 @@ static struct clk_regmap _name##_ao = { \
18 .hw.init = &(struct clk_init_data) { \ 31 .hw.init = &(struct clk_init_data) { \
19 .name = #_name "_ao", \ 32 .name = #_name "_ao", \
20 .ops = &clk_regmap_gate_ops, \ 33 .ops = &clk_regmap_gate_ops, \
21 .parent_names = (const char *[]){ "clk81" }, \ 34 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
22 .num_parents = 1, \ 35 .num_parents = 1, \
23 .flags = CLK_IGNORE_UNUSED, \ 36 .flags = CLK_IGNORE_UNUSED, \
24 }, \ 37 }, \
@@ -31,13 +44,174 @@ GXBB_AO_GATE(uart1, 3);
31GXBB_AO_GATE(uart2, 5); 44GXBB_AO_GATE(uart2, 5);
32GXBB_AO_GATE(ir_blaster, 6); 45GXBB_AO_GATE(ir_blaster, 6);
33 46
34static struct aoclk_cec_32k cec_32k_ao = { 47static struct clk_regmap ao_cts_oscin = {
35 .hw.init = &(struct clk_init_data) { 48 .data = &(struct clk_regmap_gate_data){
36 .name = "cec_32k_ao", 49 .offset = AO_RTI_PWR_CNTL_REG0,
37 .ops = &meson_aoclk_cec_32k_ops, 50 .bit_idx = 6,
38 .parent_names = (const char *[]){ "xtal" }, 51 },
52 .hw.init = &(struct clk_init_data){
53 .name = "ao_cts_oscin",
54 .ops = &clk_regmap_gate_ro_ops,
55 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
56 .num_parents = 1,
57 },
58};
59
60static struct clk_regmap ao_32k_pre = {
61 .data = &(struct clk_regmap_gate_data){
62 .offset = AO_RTC_ALT_CLK_CNTL0,
63 .bit_idx = 31,
64 },
65 .hw.init = &(struct clk_init_data){
66 .name = "ao_32k_pre",
67 .ops = &clk_regmap_gate_ops,
68 .parent_names = (const char *[]){ "ao_cts_oscin" },
69 .num_parents = 1,
70 },
71};
72
73static const struct meson_clk_dualdiv_param gxbb_32k_div_table[] = {
74 {
75 .dual = 1,
76 .n1 = 733,
77 .m1 = 8,
78 .n2 = 732,
79 .m2 = 11,
80 }, {}
81};
82
83static struct clk_regmap ao_32k_div = {
84 .data = &(struct meson_clk_dualdiv_data){
85 .n1 = {
86 .reg_off = AO_RTC_ALT_CLK_CNTL0,
87 .shift = 0,
88 .width = 12,
89 },
90 .n2 = {
91 .reg_off = AO_RTC_ALT_CLK_CNTL0,
92 .shift = 12,
93 .width = 12,
94 },
95 .m1 = {
96 .reg_off = AO_RTC_ALT_CLK_CNTL1,
97 .shift = 0,
98 .width = 12,
99 },
100 .m2 = {
101 .reg_off = AO_RTC_ALT_CLK_CNTL1,
102 .shift = 12,
103 .width = 12,
104 },
105 .dual = {
106 .reg_off = AO_RTC_ALT_CLK_CNTL0,
107 .shift = 28,
108 .width = 1,
109 },
110 .table = gxbb_32k_div_table,
111 },
112 .hw.init = &(struct clk_init_data){
113 .name = "ao_32k_div",
114 .ops = &meson_clk_dualdiv_ops,
115 .parent_names = (const char *[]){ "ao_32k_pre" },
116 .num_parents = 1,
117 },
118};
119
120static struct clk_regmap ao_32k_sel = {
121 .data = &(struct clk_regmap_mux_data) {
122 .offset = AO_RTC_ALT_CLK_CNTL1,
123 .mask = 0x1,
124 .shift = 24,
125 .flags = CLK_MUX_ROUND_CLOSEST,
126 },
127 .hw.init = &(struct clk_init_data){
128 .name = "ao_32k_sel",
129 .ops = &clk_regmap_mux_ops,
130 .parent_names = (const char *[]){ "ao_32k_div",
131 "ao_32k_pre" },
132 .num_parents = 2,
133 .flags = CLK_SET_RATE_PARENT,
134 },
135};
136
137static struct clk_regmap ao_32k = {
138 .data = &(struct clk_regmap_gate_data){
139 .offset = AO_RTC_ALT_CLK_CNTL0,
140 .bit_idx = 30,
141 },
142 .hw.init = &(struct clk_init_data){
143 .name = "ao_32k",
144 .ops = &clk_regmap_gate_ops,
145 .parent_names = (const char *[]){ "ao_32k_sel" },
39 .num_parents = 1, 146 .num_parents = 1,
40 .flags = CLK_IGNORE_UNUSED, 147 .flags = CLK_SET_RATE_PARENT,
148 },
149};
150
151static struct clk_regmap ao_cts_rtc_oscin = {
152 .data = &(struct clk_regmap_mux_data) {
153 .offset = AO_RTI_PWR_CNTL_REG0,
154 .mask = 0x7,
155 .shift = 10,
156 .table = (u32[]){ 1, 2, 3, 4 },
157 .flags = CLK_MUX_ROUND_CLOSEST,
158 },
159 .hw.init = &(struct clk_init_data){
160 .name = "ao_cts_rtc_oscin",
161 .ops = &clk_regmap_mux_ops,
162 .parent_names = (const char *[]){ IN_PREFIX "ext-32k-0",
163 IN_PREFIX "ext-32k-1",
164 IN_PREFIX "ext-32k-2",
165 "ao_32k" },
166 .num_parents = 4,
167 .flags = CLK_SET_RATE_PARENT,
168 },
169};
170
171static struct clk_regmap ao_clk81 = {
172 .data = &(struct clk_regmap_mux_data) {
173 .offset = AO_RTI_PWR_CNTL_REG0,
174 .mask = 0x1,
175 .shift = 0,
176 .flags = CLK_MUX_ROUND_CLOSEST,
177 },
178 .hw.init = &(struct clk_init_data){
179 .name = "ao_clk81",
180 .ops = &clk_regmap_mux_ro_ops,
181 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
182 "ao_cts_rtc_oscin" },
183 .num_parents = 2,
184 .flags = CLK_SET_RATE_PARENT,
185 },
186};
187
188static struct clk_regmap ao_cts_cec = {
189 .data = &(struct clk_regmap_mux_data) {
190 .offset = AO_CRT_CLK_CNTL1,
191 .mask = 0x1,
192 .shift = 27,
193 .flags = CLK_MUX_ROUND_CLOSEST,
194 },
195 .hw.init = &(struct clk_init_data){
196 .name = "ao_cts_cec",
197 .ops = &clk_regmap_mux_ops,
198 /*
199 * FIXME: The 'fixme' parent obviously does not exist.
200 *
201 * ATM, CCF won't call get_parent() if num_parents is 1. It
202 * does not allow NULL as a parent name either.
203 *
204 * On this particular mux, we only know the input #1 parent
205 * but, on boot, unknown input #0 is set, so it is critical
206 * to call .get_parent() on it
207 *
208 * Until CCF gets fixed, adding this fake parent that won't
209 * ever be registered should work around the problem
210 */
211 .parent_names = (const char *[]){ "fixme",
212 "ao_cts_rtc_oscin" },
213 .num_parents = 2,
214 .flags = CLK_SET_RATE_PARENT,
41 }, 215 },
42}; 216};
43 217
@@ -50,13 +224,21 @@ static const unsigned int gxbb_aoclk_reset[] = {
50 [RESET_AO_IR_BLASTER] = 23, 224 [RESET_AO_IR_BLASTER] = 23,
51}; 225};
52 226
53static struct clk_regmap *gxbb_aoclk_gate[] = { 227static struct clk_regmap *gxbb_aoclk[] = {
54 [CLKID_AO_REMOTE] = &remote_ao, 228 &remote_ao,
55 [CLKID_AO_I2C_MASTER] = &i2c_master_ao, 229 &i2c_master_ao,
56 [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao, 230 &i2c_slave_ao,
57 [CLKID_AO_UART1] = &uart1_ao, 231 &uart1_ao,
58 [CLKID_AO_UART2] = &uart2_ao, 232 &uart2_ao,
59 [CLKID_AO_IR_BLASTER] = &ir_blaster_ao, 233 &ir_blaster_ao,
234 &ao_cts_oscin,
235 &ao_32k_pre,
236 &ao_32k_div,
237 &ao_32k_sel,
238 &ao_32k,
239 &ao_cts_rtc_oscin,
240 &ao_clk81,
241 &ao_cts_cec,
60}; 242};
61 243
62static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = { 244static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
@@ -67,52 +249,38 @@ static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
67 [CLKID_AO_UART1] = &uart1_ao.hw, 249 [CLKID_AO_UART1] = &uart1_ao.hw,
68 [CLKID_AO_UART2] = &uart2_ao.hw, 250 [CLKID_AO_UART2] = &uart2_ao.hw,
69 [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw, 251 [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
70 [CLKID_AO_CEC_32K] = &cec_32k_ao.hw, 252 [CLKID_AO_CEC_32K] = &ao_cts_cec.hw,
253 [CLKID_AO_CTS_OSCIN] = &ao_cts_oscin.hw,
254 [CLKID_AO_32K_PRE] = &ao_32k_pre.hw,
255 [CLKID_AO_32K_DIV] = &ao_32k_div.hw,
256 [CLKID_AO_32K_SEL] = &ao_32k_sel.hw,
257 [CLKID_AO_32K] = &ao_32k.hw,
258 [CLKID_AO_CTS_RTC_OSCIN] = &ao_cts_rtc_oscin.hw,
259 [CLKID_AO_CLK81] = &ao_clk81.hw,
71 }, 260 },
72 .num = NR_CLKS, 261 .num = NR_CLKS,
73}; 262};
74 263
75static int gxbb_register_cec_ao_32k(struct platform_device *pdev) 264static const struct meson_aoclk_input gxbb_aoclk_inputs[] = {
76{ 265 { .name = "xtal", .required = true, },
77 struct device *dev = &pdev->dev; 266 { .name = "mpeg-clk", .required = true, },
78 struct regmap *regmap; 267 {. name = "ext-32k-0", .required = false, },
79 int ret; 268 {. name = "ext-32k-1", .required = false, },
80 269 {. name = "ext-32k-2", .required = false, },
81 regmap = syscon_node_to_regmap(of_get_parent(dev->of_node)); 270};
82 if (IS_ERR(regmap)) {
83 dev_err(dev, "failed to get regmap\n");
84 return PTR_ERR(regmap);
85 }
86
87 /* Specific clocks */
88 cec_32k_ao.regmap = regmap;
89 ret = devm_clk_hw_register(dev, &cec_32k_ao.hw);
90 if (ret) {
91 dev_err(&pdev->dev, "clk cec_32k_ao register failed.\n");
92 return ret;
93 }
94
95 return 0;
96}
97 271
98static const struct meson_aoclk_data gxbb_aoclkc_data = { 272static const struct meson_aoclk_data gxbb_aoclkc_data = {
99 .reset_reg = AO_RTI_GEN_CNTL_REG0, 273 .reset_reg = AO_RTI_GEN_CNTL_REG0,
100 .num_reset = ARRAY_SIZE(gxbb_aoclk_reset), 274 .num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
101 .reset = gxbb_aoclk_reset, 275 .reset = gxbb_aoclk_reset,
102 .num_clks = ARRAY_SIZE(gxbb_aoclk_gate), 276 .num_clks = ARRAY_SIZE(gxbb_aoclk),
103 .clks = gxbb_aoclk_gate, 277 .clks = gxbb_aoclk,
104 .hw_data = &gxbb_aoclk_onecell_data, 278 .hw_data = &gxbb_aoclk_onecell_data,
279 .inputs = gxbb_aoclk_inputs,
280 .num_inputs = ARRAY_SIZE(gxbb_aoclk_inputs),
281 .input_prefix = IN_PREFIX,
105}; 282};
106 283
107static int gxbb_aoclkc_probe(struct platform_device *pdev)
108{
109 int ret = gxbb_register_cec_ao_32k(pdev);
110 if (ret)
111 return ret;
112
113 return meson_aoclkc_probe(pdev);
114}
115
116static const struct of_device_id gxbb_aoclkc_match_table[] = { 284static const struct of_device_id gxbb_aoclkc_match_table[] = {
117 { 285 {
118 .compatible = "amlogic,meson-gx-aoclkc", 286 .compatible = "amlogic,meson-gx-aoclkc",
@@ -122,7 +290,7 @@ static const struct of_device_id gxbb_aoclkc_match_table[] = {
122}; 290};
123 291
124static struct platform_driver gxbb_aoclkc_driver = { 292static struct platform_driver gxbb_aoclkc_driver = {
125 .probe = gxbb_aoclkc_probe, 293 .probe = meson_aoclkc_probe,
126 .driver = { 294 .driver = {
127 .name = "gxbb-aoclkc", 295 .name = "gxbb-aoclkc",
128 .of_match_table = gxbb_aoclkc_match_table, 296 .of_match_table = gxbb_aoclkc_match_table,
diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h
index c514493d989a..1db16f9b37d4 100644
--- a/drivers/clk/meson/gxbb-aoclk.h
+++ b/drivers/clk/meson/gxbb-aoclk.h
@@ -7,25 +7,7 @@
7#ifndef __GXBB_AOCLKC_H 7#ifndef __GXBB_AOCLKC_H
8#define __GXBB_AOCLKC_H 8#define __GXBB_AOCLKC_H
9 9
10#define NR_CLKS 7 10#define NR_CLKS 14
11
12/* AO Configuration Clock registers offsets */
13#define AO_RTI_PWR_CNTL_REG1 0x0c
14#define AO_RTI_PWR_CNTL_REG0 0x10
15#define AO_RTI_GEN_CNTL_REG0 0x40
16#define AO_OSCIN_CNTL 0x58
17#define AO_CRT_CLK_CNTL1 0x68
18#define AO_RTC_ALT_CLK_CNTL0 0x94
19#define AO_RTC_ALT_CLK_CNTL1 0x98
20
21struct aoclk_cec_32k {
22 struct clk_hw hw;
23 struct regmap *regmap;
24};
25
26#define to_aoclk_cec_32k(_hw) container_of(_hw, struct aoclk_cec_32k, hw)
27
28extern const struct clk_ops meson_aoclk_cec_32k_ops;
29 11
30#include <dt-bindings/clock/gxbb-aoclkc.h> 12#include <dt-bindings/clock/gxbb-aoclkc.h>
31#include <dt-bindings/reset/gxbb-aoclkc.h> 13#include <dt-bindings/reset/gxbb-aoclkc.h>
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 65f2599e5243..04df2e208ed6 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -4,17 +4,20 @@
4 * Michael Turquette <mturquette@baylibre.com> 4 * Michael Turquette <mturquette@baylibre.com>
5 */ 5 */
6 6
7#include <linux/clk.h>
8#include <linux/clk-provider.h> 7#include <linux/clk-provider.h>
9#include <linux/init.h> 8#include <linux/init.h>
10#include <linux/of_device.h> 9#include <linux/of_device.h>
11#include <linux/mfd/syscon.h>
12#include <linux/platform_device.h> 10#include <linux/platform_device.h>
13#include <linux/regmap.h>
14 11
15#include "clkc.h"
16#include "gxbb.h" 12#include "gxbb.h"
13#include "clk-input.h"
17#include "clk-regmap.h" 14#include "clk-regmap.h"
15#include "clk-pll.h"
16#include "clk-mpll.h"
17#include "meson-eeclk.h"
18#include "vid-pll-div.h"
19
20#define IN_PREFIX "ee-in-"
18 21
19static DEFINE_SPINLOCK(meson_clk_lock); 22static DEFINE_SPINLOCK(meson_clk_lock);
20 23
@@ -118,7 +121,7 @@ static struct clk_regmap gxbb_fixed_pll_dco = {
118 .hw.init = &(struct clk_init_data){ 121 .hw.init = &(struct clk_init_data){
119 .name = "fixed_pll_dco", 122 .name = "fixed_pll_dco",
120 .ops = &meson_clk_pll_ro_ops, 123 .ops = &meson_clk_pll_ro_ops,
121 .parent_names = (const char *[]){ "xtal" }, 124 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
122 .num_parents = 1, 125 .num_parents = 1,
123 }, 126 },
124}; 127};
@@ -148,7 +151,7 @@ static struct clk_fixed_factor gxbb_hdmi_pll_pre_mult = {
148 .hw.init = &(struct clk_init_data){ 151 .hw.init = &(struct clk_init_data){
149 .name = "hdmi_pll_pre_mult", 152 .name = "hdmi_pll_pre_mult",
150 .ops = &clk_fixed_factor_ops, 153 .ops = &clk_fixed_factor_ops,
151 .parent_names = (const char *[]){ "xtal" }, 154 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
152 .num_parents = 1, 155 .num_parents = 1,
153 }, 156 },
154}; 157};
@@ -241,7 +244,7 @@ static struct clk_regmap gxl_hdmi_pll_dco = {
241 .hw.init = &(struct clk_init_data){ 244 .hw.init = &(struct clk_init_data){
242 .name = "hdmi_pll_dco", 245 .name = "hdmi_pll_dco",
243 .ops = &meson_clk_pll_ro_ops, 246 .ops = &meson_clk_pll_ro_ops,
244 .parent_names = (const char *[]){ "xtal" }, 247 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
245 .num_parents = 1, 248 .num_parents = 1,
246 /* 249 /*
247 * Display directly handle hdmi pll registers ATM, we need 250 * Display directly handle hdmi pll registers ATM, we need
@@ -378,7 +381,7 @@ static struct clk_regmap gxbb_sys_pll_dco = {
378 .hw.init = &(struct clk_init_data){ 381 .hw.init = &(struct clk_init_data){
379 .name = "sys_pll_dco", 382 .name = "sys_pll_dco",
380 .ops = &meson_clk_pll_ro_ops, 383 .ops = &meson_clk_pll_ro_ops,
381 .parent_names = (const char *[]){ "xtal" }, 384 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
382 .num_parents = 1, 385 .num_parents = 1,
383 }, 386 },
384}; 387};
@@ -439,7 +442,7 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
439 .hw.init = &(struct clk_init_data){ 442 .hw.init = &(struct clk_init_data){
440 .name = "gp0_pll_dco", 443 .name = "gp0_pll_dco",
441 .ops = &meson_clk_pll_ops, 444 .ops = &meson_clk_pll_ops,
442 .parent_names = (const char *[]){ "xtal" }, 445 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
443 .num_parents = 1, 446 .num_parents = 1,
444 }, 447 },
445}; 448};
@@ -491,7 +494,7 @@ static struct clk_regmap gxl_gp0_pll_dco = {
491 .hw.init = &(struct clk_init_data){ 494 .hw.init = &(struct clk_init_data){
492 .name = "gp0_pll_dco", 495 .name = "gp0_pll_dco",
493 .ops = &meson_clk_pll_ops, 496 .ops = &meson_clk_pll_ops,
494 .parent_names = (const char *[]){ "xtal" }, 497 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
495 .num_parents = 1, 498 .num_parents = 1,
496 }, 499 },
497}; 500};
@@ -789,7 +792,7 @@ static struct clk_regmap gxbb_mpll2 = {
789 792
790static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 }; 793static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
791static const char * const clk81_parent_names[] = { 794static const char * const clk81_parent_names[] = {
792 "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4", 795 IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
793 "fclk_div3", "fclk_div5" 796 "fclk_div3", "fclk_div5"
794}; 797};
795 798
@@ -852,7 +855,7 @@ static struct clk_regmap gxbb_sar_adc_clk_sel = {
852 .name = "sar_adc_clk_sel", 855 .name = "sar_adc_clk_sel",
853 .ops = &clk_regmap_mux_ops, 856 .ops = &clk_regmap_mux_ops,
854 /* NOTE: The datasheet doesn't list the parents for bit 10 */ 857 /* NOTE: The datasheet doesn't list the parents for bit 10 */
855 .parent_names = (const char *[]){ "xtal", "clk81", }, 858 .parent_names = (const char *[]){ IN_PREFIX "xtal", "clk81", },
856 .num_parents = 2, 859 .num_parents = 2,
857 }, 860 },
858}; 861};
@@ -891,7 +894,7 @@ static struct clk_regmap gxbb_sar_adc_clk = {
891 */ 894 */
892 895
893static const char * const gxbb_mali_0_1_parent_names[] = { 896static const char * const gxbb_mali_0_1_parent_names[] = {
894 "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7", 897 IN_PREFIX "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7",
895 "fclk_div4", "fclk_div3", "fclk_div5" 898 "fclk_div4", "fclk_div3", "fclk_div5"
896}; 899};
897 900
@@ -1153,7 +1156,7 @@ static struct clk_regmap gxbb_32k_clk = {
1153}; 1156};
1154 1157
1155static const char * const gxbb_32k_clk_parent_names[] = { 1158static const char * const gxbb_32k_clk_parent_names[] = {
1156 "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5" 1159 IN_PREFIX "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5"
1157}; 1160};
1158 1161
1159static struct clk_regmap gxbb_32k_clk_sel = { 1162static struct clk_regmap gxbb_32k_clk_sel = {
@@ -1172,7 +1175,7 @@ static struct clk_regmap gxbb_32k_clk_sel = {
1172}; 1175};
1173 1176
1174static const char * const gxbb_sd_emmc_clk0_parent_names[] = { 1177static const char * const gxbb_sd_emmc_clk0_parent_names[] = {
1175 "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7", 1178 IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
1176 1179
1177 /* 1180 /*
1178 * Following these parent clocks, we should also have had mpll2, mpll3 1181 * Following these parent clocks, we should also have had mpll2, mpll3
@@ -2138,7 +2141,7 @@ static struct clk_regmap gxbb_hdmi_tx = {
2138/* HDMI Clocks */ 2141/* HDMI Clocks */
2139 2142
2140static const char * const gxbb_hdmi_parent_names[] = { 2143static const char * const gxbb_hdmi_parent_names[] = {
2141 "xtal", "fclk_div4", "fclk_div3", "fclk_div5" 2144 IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
2142}; 2145};
2143 2146
2144static struct clk_regmap gxbb_hdmi_sel = { 2147static struct clk_regmap gxbb_hdmi_sel = {
@@ -2285,7 +2288,7 @@ static struct clk_regmap gxbb_vdec_hevc = {
2285static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8, 2288static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
2286 9, 10, 11, 13, 14, }; 2289 9, 10, 11, 13, 14, };
2287static const char * const gen_clk_parent_names[] = { 2290static const char * const gen_clk_parent_names[] = {
2288 "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2", 2291 IN_PREFIX "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2",
2289 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll", 2292 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
2290}; 2293};
2291 2294
@@ -2854,6 +2857,192 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
2854}; 2857};
2855 2858
2856static struct clk_regmap *const gxbb_clk_regmaps[] = { 2859static struct clk_regmap *const gxbb_clk_regmaps[] = {
2860 &gxbb_clk81,
2861 &gxbb_ddr,
2862 &gxbb_dos,
2863 &gxbb_isa,
2864 &gxbb_pl301,
2865 &gxbb_periphs,
2866 &gxbb_spicc,
2867 &gxbb_i2c,
2868 &gxbb_sar_adc,
2869 &gxbb_smart_card,
2870 &gxbb_rng0,
2871 &gxbb_uart0,
2872 &gxbb_sdhc,
2873 &gxbb_stream,
2874 &gxbb_async_fifo,
2875 &gxbb_sdio,
2876 &gxbb_abuf,
2877 &gxbb_hiu_iface,
2878 &gxbb_assist_misc,
2879 &gxbb_spi,
2880 &gxbb_i2s_spdif,
2881 &gxbb_eth,
2882 &gxbb_demux,
2883 &gxbb_aiu_glue,
2884 &gxbb_iec958,
2885 &gxbb_i2s_out,
2886 &gxbb_amclk,
2887 &gxbb_aififo2,
2888 &gxbb_mixer,
2889 &gxbb_mixer_iface,
2890 &gxbb_adc,
2891 &gxbb_blkmv,
2892 &gxbb_aiu,
2893 &gxbb_uart1,
2894 &gxbb_g2d,
2895 &gxbb_usb0,
2896 &gxbb_usb1,
2897 &gxbb_reset,
2898 &gxbb_nand,
2899 &gxbb_dos_parser,
2900 &gxbb_usb,
2901 &gxbb_vdin1,
2902 &gxbb_ahb_arb0,
2903 &gxbb_efuse,
2904 &gxbb_boot_rom,
2905 &gxbb_ahb_data_bus,
2906 &gxbb_ahb_ctrl_bus,
2907 &gxbb_hdmi_intr_sync,
2908 &gxbb_hdmi_pclk,
2909 &gxbb_usb1_ddr_bridge,
2910 &gxbb_usb0_ddr_bridge,
2911 &gxbb_mmc_pclk,
2912 &gxbb_dvin,
2913 &gxbb_uart2,
2914 &gxbb_sana,
2915 &gxbb_vpu_intr,
2916 &gxbb_sec_ahb_ahb3_bridge,
2917 &gxbb_clk81_a53,
2918 &gxbb_vclk2_venci0,
2919 &gxbb_vclk2_venci1,
2920 &gxbb_vclk2_vencp0,
2921 &gxbb_vclk2_vencp1,
2922 &gxbb_gclk_venci_int0,
2923 &gxbb_gclk_vencp_int,
2924 &gxbb_dac_clk,
2925 &gxbb_aoclk_gate,
2926 &gxbb_iec958_gate,
2927 &gxbb_enc480p,
2928 &gxbb_rng1,
2929 &gxbb_gclk_venci_int1,
2930 &gxbb_vclk2_venclmcc,
2931 &gxbb_vclk2_vencl,
2932 &gxbb_vclk_other,
2933 &gxbb_edp,
2934 &gxbb_ao_media_cpu,
2935 &gxbb_ao_ahb_sram,
2936 &gxbb_ao_ahb_bus,
2937 &gxbb_ao_iface,
2938 &gxbb_ao_i2c,
2939 &gxbb_emmc_a,
2940 &gxbb_emmc_b,
2941 &gxbb_emmc_c,
2942 &gxbb_sar_adc_clk,
2943 &gxbb_mali_0,
2944 &gxbb_mali_1,
2945 &gxbb_cts_amclk,
2946 &gxbb_cts_mclk_i958,
2947 &gxbb_32k_clk,
2948 &gxbb_sd_emmc_a_clk0,
2949 &gxbb_sd_emmc_b_clk0,
2950 &gxbb_sd_emmc_c_clk0,
2951 &gxbb_vpu_0,
2952 &gxbb_vpu_1,
2953 &gxbb_vapb_0,
2954 &gxbb_vapb_1,
2955 &gxbb_vapb,
2956 &gxbb_mpeg_clk_div,
2957 &gxbb_sar_adc_clk_div,
2958 &gxbb_mali_0_div,
2959 &gxbb_mali_1_div,
2960 &gxbb_cts_mclk_i958_div,
2961 &gxbb_32k_clk_div,
2962 &gxbb_sd_emmc_a_clk0_div,
2963 &gxbb_sd_emmc_b_clk0_div,
2964 &gxbb_sd_emmc_c_clk0_div,
2965 &gxbb_vpu_0_div,
2966 &gxbb_vpu_1_div,
2967 &gxbb_vapb_0_div,
2968 &gxbb_vapb_1_div,
2969 &gxbb_mpeg_clk_sel,
2970 &gxbb_sar_adc_clk_sel,
2971 &gxbb_mali_0_sel,
2972 &gxbb_mali_1_sel,
2973 &gxbb_mali,
2974 &gxbb_cts_amclk_sel,
2975 &gxbb_cts_mclk_i958_sel,
2976 &gxbb_cts_i958,
2977 &gxbb_32k_clk_sel,
2978 &gxbb_sd_emmc_a_clk0_sel,
2979 &gxbb_sd_emmc_b_clk0_sel,
2980 &gxbb_sd_emmc_c_clk0_sel,
2981 &gxbb_vpu_0_sel,
2982 &gxbb_vpu_1_sel,
2983 &gxbb_vpu,
2984 &gxbb_vapb_0_sel,
2985 &gxbb_vapb_1_sel,
2986 &gxbb_vapb_sel,
2987 &gxbb_mpll0,
2988 &gxbb_mpll1,
2989 &gxbb_mpll2,
2990 &gxbb_mpll0_div,
2991 &gxbb_mpll1_div,
2992 &gxbb_mpll2_div,
2993 &gxbb_cts_amclk_div,
2994 &gxbb_fixed_pll,
2995 &gxbb_sys_pll,
2996 &gxbb_mpll_prediv,
2997 &gxbb_fclk_div2,
2998 &gxbb_fclk_div3,
2999 &gxbb_fclk_div4,
3000 &gxbb_fclk_div5,
3001 &gxbb_fclk_div7,
3002 &gxbb_vdec_1_sel,
3003 &gxbb_vdec_1_div,
3004 &gxbb_vdec_1,
3005 &gxbb_vdec_hevc_sel,
3006 &gxbb_vdec_hevc_div,
3007 &gxbb_vdec_hevc,
3008 &gxbb_gen_clk_sel,
3009 &gxbb_gen_clk_div,
3010 &gxbb_gen_clk,
3011 &gxbb_fixed_pll_dco,
3012 &gxbb_sys_pll_dco,
3013 &gxbb_gp0_pll,
3014 &gxbb_vid_pll,
3015 &gxbb_vid_pll_sel,
3016 &gxbb_vid_pll_div,
3017 &gxbb_vclk,
3018 &gxbb_vclk_sel,
3019 &gxbb_vclk_div,
3020 &gxbb_vclk_input,
3021 &gxbb_vclk_div1,
3022 &gxbb_vclk_div2_en,
3023 &gxbb_vclk_div4_en,
3024 &gxbb_vclk_div6_en,
3025 &gxbb_vclk_div12_en,
3026 &gxbb_vclk2,
3027 &gxbb_vclk2_sel,
3028 &gxbb_vclk2_div,
3029 &gxbb_vclk2_input,
3030 &gxbb_vclk2_div1,
3031 &gxbb_vclk2_div2_en,
3032 &gxbb_vclk2_div4_en,
3033 &gxbb_vclk2_div6_en,
3034 &gxbb_vclk2_div12_en,
3035 &gxbb_cts_enci,
3036 &gxbb_cts_enci_sel,
3037 &gxbb_cts_encp,
3038 &gxbb_cts_encp_sel,
3039 &gxbb_cts_vdac,
3040 &gxbb_cts_vdac_sel,
3041 &gxbb_hdmi_tx,
3042 &gxbb_hdmi_tx_sel,
3043 &gxbb_hdmi_sel,
3044 &gxbb_hdmi_div,
3045 &gxbb_hdmi,
2857 &gxbb_gp0_pll_dco, 3046 &gxbb_gp0_pll_dco,
2858 &gxbb_hdmi_pll, 3047 &gxbb_hdmi_pll,
2859 &gxbb_hdmi_pll_od, 3048 &gxbb_hdmi_pll_od,
@@ -2862,14 +3051,6 @@ static struct clk_regmap *const gxbb_clk_regmaps[] = {
2862}; 3051};
2863 3052
2864static struct clk_regmap *const gxl_clk_regmaps[] = { 3053static struct clk_regmap *const gxl_clk_regmaps[] = {
2865 &gxl_gp0_pll_dco,
2866 &gxl_hdmi_pll,
2867 &gxl_hdmi_pll_od,
2868 &gxl_hdmi_pll_od2,
2869 &gxl_hdmi_pll_dco,
2870};
2871
2872static struct clk_regmap *const gx_clk_regmaps[] = {
2873 &gxbb_clk81, 3054 &gxbb_clk81,
2874 &gxbb_ddr, 3055 &gxbb_ddr,
2875 &gxbb_dos, 3056 &gxbb_dos,
@@ -3056,23 +3237,22 @@ static struct clk_regmap *const gx_clk_regmaps[] = {
3056 &gxbb_hdmi_sel, 3237 &gxbb_hdmi_sel,
3057 &gxbb_hdmi_div, 3238 &gxbb_hdmi_div,
3058 &gxbb_hdmi, 3239 &gxbb_hdmi,
3240 &gxl_gp0_pll_dco,
3241 &gxl_hdmi_pll,
3242 &gxl_hdmi_pll_od,
3243 &gxl_hdmi_pll_od2,
3244 &gxl_hdmi_pll_dco,
3059}; 3245};
3060 3246
3061struct clkc_data { 3247static const struct meson_eeclkc_data gxbb_clkc_data = {
3062 struct clk_regmap *const *regmap_clks;
3063 unsigned int regmap_clks_count;
3064 struct clk_hw_onecell_data *hw_onecell_data;
3065};
3066
3067static const struct clkc_data gxbb_clkc_data = {
3068 .regmap_clks = gxbb_clk_regmaps, 3248 .regmap_clks = gxbb_clk_regmaps,
3069 .regmap_clks_count = ARRAY_SIZE(gxbb_clk_regmaps), 3249 .regmap_clk_num = ARRAY_SIZE(gxbb_clk_regmaps),
3070 .hw_onecell_data = &gxbb_hw_onecell_data, 3250 .hw_onecell_data = &gxbb_hw_onecell_data,
3071}; 3251};
3072 3252
3073static const struct clkc_data gxl_clkc_data = { 3253static const struct meson_eeclkc_data gxl_clkc_data = {
3074 .regmap_clks = gxl_clk_regmaps, 3254 .regmap_clks = gxl_clk_regmaps,
3075 .regmap_clks_count = ARRAY_SIZE(gxl_clk_regmaps), 3255 .regmap_clk_num = ARRAY_SIZE(gxl_clk_regmaps),
3076 .hw_onecell_data = &gxl_hw_onecell_data, 3256 .hw_onecell_data = &gxl_hw_onecell_data,
3077}; 3257};
3078 3258
@@ -3082,52 +3262,8 @@ static const struct of_device_id clkc_match_table[] = {
3082 {}, 3262 {},
3083}; 3263};
3084 3264
3085static int gxbb_clkc_probe(struct platform_device *pdev)
3086{
3087 const struct clkc_data *clkc_data;
3088 struct regmap *map;
3089 int ret, i;
3090 struct device *dev = &pdev->dev;
3091
3092 clkc_data = of_device_get_match_data(dev);
3093 if (!clkc_data)
3094 return -EINVAL;
3095
3096 /* Get the hhi system controller node if available */
3097 map = syscon_node_to_regmap(of_get_parent(dev->of_node));
3098 if (IS_ERR(map)) {
3099 dev_err(dev, "failed to get HHI regmap\n");
3100 return PTR_ERR(map);
3101 }
3102
3103 /* Populate regmap for the common regmap backed clocks */
3104 for (i = 0; i < ARRAY_SIZE(gx_clk_regmaps); i++)
3105 gx_clk_regmaps[i]->map = map;
3106
3107 /* Populate regmap for soc specific clocks */
3108 for (i = 0; i < clkc_data->regmap_clks_count; i++)
3109 clkc_data->regmap_clks[i]->map = map;
3110
3111 /* Register all clks */
3112 for (i = 0; i < clkc_data->hw_onecell_data->num; i++) {
3113 /* array might be sparse */
3114 if (!clkc_data->hw_onecell_data->hws[i])
3115 continue;
3116
3117 ret = devm_clk_hw_register(dev,
3118 clkc_data->hw_onecell_data->hws[i]);
3119 if (ret) {
3120 dev_err(dev, "Clock registration failed\n");
3121 return ret;
3122 }
3123 }
3124
3125 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
3126 clkc_data->hw_onecell_data);
3127}
3128
3129static struct platform_driver gxbb_driver = { 3265static struct platform_driver gxbb_driver = {
3130 .probe = gxbb_clkc_probe, 3266 .probe = meson_eeclkc_probe,
3131 .driver = { 3267 .driver = {
3132 .name = "gxbb-clkc", 3268 .name = "gxbb-clkc",
3133 .of_match_table = clkc_match_table, 3269 .of_match_table = clkc_match_table,
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index f965845917e3..b67951909e04 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -14,9 +14,11 @@
14#include <linux/reset-controller.h> 14#include <linux/reset-controller.h>
15#include <linux/mfd/syscon.h> 15#include <linux/mfd/syscon.h>
16#include <linux/of_device.h> 16#include <linux/of_device.h>
17#include "clk-regmap.h" 17#include <linux/slab.h>
18#include "meson-aoclk.h" 18#include "meson-aoclk.h"
19 19
20#include "clk-input.h"
21
20static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev, 22static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev,
21 unsigned long id) 23 unsigned long id)
22{ 24{
@@ -31,6 +33,37 @@ static const struct reset_control_ops meson_aoclk_reset_ops = {
31 .reset = meson_aoclk_do_reset, 33 .reset = meson_aoclk_do_reset,
32}; 34};
33 35
36static int meson_aoclkc_register_inputs(struct device *dev,
37 struct meson_aoclk_data *data)
38{
39 struct clk_hw *hw;
40 char *str;
41 int i;
42
43 for (i = 0; i < data->num_inputs; i++) {
44 const struct meson_aoclk_input *in = &data->inputs[i];
45
46 str = kasprintf(GFP_KERNEL, "%s%s", data->input_prefix,
47 in->name);
48 if (!str)
49 return -ENOMEM;
50
51 hw = meson_clk_hw_register_input(dev, in->name, str, 0);
52 kfree(str);
53
54 if (IS_ERR(hw)) {
55 if (!in->required && PTR_ERR(hw) == -ENOENT)
56 continue;
57 else if (PTR_ERR(hw) != -EPROBE_DEFER)
58 dev_err(dev, "failed to register input %s\n",
59 in->name);
60 return PTR_ERR(hw);
61 }
62 }
63
64 return 0;
65}
66
34int meson_aoclkc_probe(struct platform_device *pdev) 67int meson_aoclkc_probe(struct platform_device *pdev)
35{ 68{
36 struct meson_aoclk_reset_controller *rstc; 69 struct meson_aoclk_reset_controller *rstc;
@@ -53,6 +86,10 @@ int meson_aoclkc_probe(struct platform_device *pdev)
53 return PTR_ERR(regmap); 86 return PTR_ERR(regmap);
54 } 87 }
55 88
89 ret = meson_aoclkc_register_inputs(dev, data);
90 if (ret)
91 return ret;
92
56 /* Reset Controller */ 93 /* Reset Controller */
57 rstc->data = data; 94 rstc->data = data;
58 rstc->regmap = regmap; 95 rstc->regmap = regmap;
@@ -65,15 +102,20 @@ int meson_aoclkc_probe(struct platform_device *pdev)
65 return ret; 102 return ret;
66 } 103 }
67 104
68 /* 105 /* Populate regmap */
69 * Populate regmap and register all clks 106 for (clkid = 0; clkid < data->num_clks; clkid++)
70 */
71 for (clkid = 0; clkid < data->num_clks; clkid++) {
72 data->clks[clkid]->map = regmap; 107 data->clks[clkid]->map = regmap;
73 108
109 /* Register all clks */
110 for (clkid = 0; clkid < data->hw_data->num; clkid++) {
111 if (!data->hw_data->hws[clkid])
112 continue;
113
74 ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]); 114 ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
75 if (ret) 115 if (ret) {
116 dev_err(dev, "Clock registration failed\n");
76 return ret; 117 return ret;
118 }
77 } 119 }
78 120
79 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, 121 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
index ab2819e88922..999cde3868f7 100644
--- a/drivers/clk/meson/meson-aoclk.h
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -11,16 +11,27 @@
11#ifndef __MESON_AOCLK_H__ 11#ifndef __MESON_AOCLK_H__
12#define __MESON_AOCLK_H__ 12#define __MESON_AOCLK_H__
13 13
14#include <linux/clk-provider.h>
14#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/regmap.h>
15#include <linux/reset-controller.h> 17#include <linux/reset-controller.h>
18
16#include "clk-regmap.h" 19#include "clk-regmap.h"
17 20
21struct meson_aoclk_input {
22 const char *name;
23 bool required;
24};
25
18struct meson_aoclk_data { 26struct meson_aoclk_data {
19 const unsigned int reset_reg; 27 const unsigned int reset_reg;
20 const int num_reset; 28 const int num_reset;
21 const unsigned int *reset; 29 const unsigned int *reset;
22 int num_clks; 30 const int num_clks;
23 struct clk_regmap **clks; 31 struct clk_regmap **clks;
32 const int num_inputs;
33 const struct meson_aoclk_input *inputs;
34 const char *input_prefix;
24 const struct clk_hw_onecell_data *hw_data; 35 const struct clk_hw_onecell_data *hw_data;
25}; 36};
26 37
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
new file mode 100644
index 000000000000..37a34c9c3885
--- /dev/null
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -0,0 +1,63 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#include <linux/clk-provider.h>
8#include <linux/of_device.h>
9#include <linux/platform_device.h>
10#include <linux/mfd/syscon.h>
11#include <linux/regmap.h>
12
13#include "clk-input.h"
14#include "clk-regmap.h"
15#include "meson-eeclk.h"
16
17int meson_eeclkc_probe(struct platform_device *pdev)
18{
19 const struct meson_eeclkc_data *data;
20 struct device *dev = &pdev->dev;
21 struct clk_hw *input;
22 struct regmap *map;
23 int ret, i;
24
25 data = of_device_get_match_data(dev);
26 if (!data)
27 return -EINVAL;
28
29 /* Get the hhi system controller node */
30 map = syscon_node_to_regmap(of_get_parent(dev->of_node));
31 if (IS_ERR(map)) {
32 dev_err(dev,
33 "failed to get HHI regmap\n");
34 return PTR_ERR(map);
35 }
36
37 input = meson_clk_hw_register_input(dev, "xtal", IN_PREFIX "xtal", 0);
38 if (IS_ERR(input)) {
39 ret = PTR_ERR(input);
40 if (ret != -EPROBE_DEFER)
41 dev_err(dev, "failed to get input clock");
42 return ret;
43 }
44
45 /* Populate regmap for the regmap backed clocks */
46 for (i = 0; i < data->regmap_clk_num; i++)
47 data->regmap_clks[i]->map = map;
48
49 for (i = 0; i < data->hw_onecell_data->num; i++) {
50 /* array might be sparse */
51 if (!data->hw_onecell_data->hws[i])
52 continue;
53
54 ret = devm_clk_hw_register(dev, data->hw_onecell_data->hws[i]);
55 if (ret) {
56 dev_err(dev, "Clock registration failed\n");
57 return ret;
58 }
59 }
60
61 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
62 data->hw_onecell_data);
63}
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h
new file mode 100644
index 000000000000..1b809b1419fe
--- /dev/null
+++ b/drivers/clk/meson/meson-eeclk.h
@@ -0,0 +1,25 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLKC_H
8#define __MESON_CLKC_H
9
10#include <linux/clk-provider.h>
11#include "clk-regmap.h"
12
13#define IN_PREFIX "ee-in-"
14
15struct platform_device;
16
17struct meson_eeclkc_data {
18 struct clk_regmap *const *regmap_clks;
19 unsigned int regmap_clk_num;
20 struct clk_hw_onecell_data *hw_onecell_data;
21};
22
23int meson_eeclkc_probe(struct platform_device *pdev);
24
25#endif /* __MESON_CLKC_H */
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 950d0e548c75..576ad42252d0 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -16,9 +16,10 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/regmap.h> 17#include <linux/regmap.h>
18 18
19#include "clkc.h"
20#include "meson8b.h" 19#include "meson8b.h"
21#include "clk-regmap.h" 20#include "clk-regmap.h"
21#include "clk-pll.h"
22#include "clk-mpll.h"
22 23
23static DEFINE_SPINLOCK(meson_clk_lock); 24static DEFINE_SPINLOCK(meson_clk_lock);
24 25
@@ -803,16 +804,16 @@ static struct clk_fixed_factor meson8b_cpu_clk_div8 = {
803 }, 804 },
804}; 805};
805 806
806static u32 mux_table_abp[] = { 1, 2, 3, 4, 5, 6, 7 }; 807static u32 mux_table_apb[] = { 1, 2, 3, 4, 5, 6, 7 };
807static struct clk_regmap meson8b_abp_clk_sel = { 808static struct clk_regmap meson8b_apb_clk_sel = {
808 .data = &(struct clk_regmap_mux_data){ 809 .data = &(struct clk_regmap_mux_data){
809 .offset = HHI_SYS_CPU_CLK_CNTL1, 810 .offset = HHI_SYS_CPU_CLK_CNTL1,
810 .mask = 0x7, 811 .mask = 0x7,
811 .shift = 3, 812 .shift = 3,
812 .table = mux_table_abp, 813 .table = mux_table_apb,
813 }, 814 },
814 .hw.init = &(struct clk_init_data){ 815 .hw.init = &(struct clk_init_data){
815 .name = "abp_clk_sel", 816 .name = "apb_clk_sel",
816 .ops = &clk_regmap_mux_ops, 817 .ops = &clk_regmap_mux_ops,
817 .parent_names = (const char *[]){ "cpu_clk_div2", 818 .parent_names = (const char *[]){ "cpu_clk_div2",
818 "cpu_clk_div3", 819 "cpu_clk_div3",
@@ -825,16 +826,16 @@ static struct clk_regmap meson8b_abp_clk_sel = {
825 }, 826 },
826}; 827};
827 828
828static struct clk_regmap meson8b_abp_clk_gate = { 829static struct clk_regmap meson8b_apb_clk_gate = {
829 .data = &(struct clk_regmap_gate_data){ 830 .data = &(struct clk_regmap_gate_data){
830 .offset = HHI_SYS_CPU_CLK_CNTL1, 831 .offset = HHI_SYS_CPU_CLK_CNTL1,
831 .bit_idx = 16, 832 .bit_idx = 16,
832 .flags = CLK_GATE_SET_TO_DISABLE, 833 .flags = CLK_GATE_SET_TO_DISABLE,
833 }, 834 },
834 .hw.init = &(struct clk_init_data){ 835 .hw.init = &(struct clk_init_data){
835 .name = "abp_clk_dis", 836 .name = "apb_clk_dis",
836 .ops = &clk_regmap_gate_ro_ops, 837 .ops = &clk_regmap_gate_ro_ops,
837 .parent_names = (const char *[]){ "abp_clk_sel" }, 838 .parent_names = (const char *[]){ "apb_clk_sel" },
838 .num_parents = 1, 839 .num_parents = 1,
839 .flags = CLK_SET_RATE_PARENT, 840 .flags = CLK_SET_RATE_PARENT,
840 }, 841 },
@@ -1573,6 +1574,135 @@ static struct clk_regmap meson8b_hdmi_sys = {
1573 }, 1574 },
1574}; 1575};
1575 1576
1577/*
1578 * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
1579 * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only
1580 * has mali_0 and no glitch-free mux.
1581 */
1582static const char * const meson8b_mali_0_1_parent_names[] = {
1583 "xtal", "mpll2", "mpll1", "fclk_div7", "fclk_div4", "fclk_div3",
1584 "fclk_div5"
1585};
1586
1587static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
1588
1589static struct clk_regmap meson8b_mali_0_sel = {
1590 .data = &(struct clk_regmap_mux_data){
1591 .offset = HHI_MALI_CLK_CNTL,
1592 .mask = 0x7,
1593 .shift = 9,
1594 .table = meson8b_mali_0_1_mux_table,
1595 },
1596 .hw.init = &(struct clk_init_data){
1597 .name = "mali_0_sel",
1598 .ops = &clk_regmap_mux_ops,
1599 .parent_names = meson8b_mali_0_1_parent_names,
1600 .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names),
1601 /*
1602 * Don't propagate rate changes up because the only changeable
1603 * parents are mpll1 and mpll2 but we need those for audio and
1604 * RGMII (Ethernet). We don't want to change the audio or
1605 * Ethernet clocks when setting the GPU frequency.
1606 */
1607 .flags = 0,
1608 },
1609};
1610
1611static struct clk_regmap meson8b_mali_0_div = {
1612 .data = &(struct clk_regmap_div_data){
1613 .offset = HHI_MALI_CLK_CNTL,
1614 .shift = 0,
1615 .width = 7,
1616 },
1617 .hw.init = &(struct clk_init_data){
1618 .name = "mali_0_div",
1619 .ops = &clk_regmap_divider_ops,
1620 .parent_names = (const char *[]){ "mali_0_sel" },
1621 .num_parents = 1,
1622 .flags = CLK_SET_RATE_PARENT,
1623 },
1624};
1625
1626static struct clk_regmap meson8b_mali_0 = {
1627 .data = &(struct clk_regmap_gate_data){
1628 .offset = HHI_MALI_CLK_CNTL,
1629 .bit_idx = 8,
1630 },
1631 .hw.init = &(struct clk_init_data){
1632 .name = "mali_0",
1633 .ops = &clk_regmap_gate_ops,
1634 .parent_names = (const char *[]){ "mali_0_div" },
1635 .num_parents = 1,
1636 .flags = CLK_SET_RATE_PARENT,
1637 },
1638};
1639
1640static struct clk_regmap meson8b_mali_1_sel = {
1641 .data = &(struct clk_regmap_mux_data){
1642 .offset = HHI_MALI_CLK_CNTL,
1643 .mask = 0x7,
1644 .shift = 25,
1645 .table = meson8b_mali_0_1_mux_table,
1646 },
1647 .hw.init = &(struct clk_init_data){
1648 .name = "mali_1_sel",
1649 .ops = &clk_regmap_mux_ops,
1650 .parent_names = meson8b_mali_0_1_parent_names,
1651 .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names),
1652 /*
1653 * Don't propagate rate changes up because the only changeable
1654 * parents are mpll1 and mpll2 but we need those for audio and
1655 * RGMII (Ethernet). We don't want to change the audio or
1656 * Ethernet clocks when setting the GPU frequency.
1657 */
1658 .flags = 0,
1659 },
1660};
1661
1662static struct clk_regmap meson8b_mali_1_div = {
1663 .data = &(struct clk_regmap_div_data){
1664 .offset = HHI_MALI_CLK_CNTL,
1665 .shift = 16,
1666 .width = 7,
1667 },
1668 .hw.init = &(struct clk_init_data){
1669 .name = "mali_1_div",
1670 .ops = &clk_regmap_divider_ops,
1671 .parent_names = (const char *[]){ "mali_1_sel" },
1672 .num_parents = 1,
1673 .flags = CLK_SET_RATE_PARENT,
1674 },
1675};
1676
1677static struct clk_regmap meson8b_mali_1 = {
1678 .data = &(struct clk_regmap_gate_data){
1679 .offset = HHI_MALI_CLK_CNTL,
1680 .bit_idx = 24,
1681 },
1682 .hw.init = &(struct clk_init_data){
1683 .name = "mali_1",
1684 .ops = &clk_regmap_gate_ops,
1685 .parent_names = (const char *[]){ "mali_1_div" },
1686 .num_parents = 1,
1687 .flags = CLK_SET_RATE_PARENT,
1688 },
1689};
1690
1691static struct clk_regmap meson8b_mali = {
1692 .data = &(struct clk_regmap_mux_data){
1693 .offset = HHI_MALI_CLK_CNTL,
1694 .mask = 1,
1695 .shift = 31,
1696 },
1697 .hw.init = &(struct clk_init_data){
1698 .name = "mali",
1699 .ops = &clk_regmap_mux_ops,
1700 .parent_names = (const char *[]){ "mali_0", "mali_1" },
1701 .num_parents = 2,
1702 .flags = CLK_SET_RATE_PARENT,
1703 },
1704};
1705
1576/* Everything Else (EE) domain gates */ 1706/* Everything Else (EE) domain gates */
1577 1707
1578static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0); 1708static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
@@ -1659,6 +1789,188 @@ static MESON_GATE(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1);
1659static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2); 1789static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2);
1660static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3); 1790static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
1661 1791
1792static struct clk_hw_onecell_data meson8_hw_onecell_data = {
1793 .hws = {
1794 [CLKID_XTAL] = &meson8b_xtal.hw,
1795 [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
1796 [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
1797 [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
1798 [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
1799 [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
1800 [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
1801 [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
1802 [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
1803 [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
1804 [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
1805 [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
1806 [CLKID_CLK81] = &meson8b_clk81.hw,
1807 [CLKID_DDR] = &meson8b_ddr.hw,
1808 [CLKID_DOS] = &meson8b_dos.hw,
1809 [CLKID_ISA] = &meson8b_isa.hw,
1810 [CLKID_PL301] = &meson8b_pl301.hw,
1811 [CLKID_PERIPHS] = &meson8b_periphs.hw,
1812 [CLKID_SPICC] = &meson8b_spicc.hw,
1813 [CLKID_I2C] = &meson8b_i2c.hw,
1814 [CLKID_SAR_ADC] = &meson8b_sar_adc.hw,
1815 [CLKID_SMART_CARD] = &meson8b_smart_card.hw,
1816 [CLKID_RNG0] = &meson8b_rng0.hw,
1817 [CLKID_UART0] = &meson8b_uart0.hw,
1818 [CLKID_SDHC] = &meson8b_sdhc.hw,
1819 [CLKID_STREAM] = &meson8b_stream.hw,
1820 [CLKID_ASYNC_FIFO] = &meson8b_async_fifo.hw,
1821 [CLKID_SDIO] = &meson8b_sdio.hw,
1822 [CLKID_ABUF] = &meson8b_abuf.hw,
1823 [CLKID_HIU_IFACE] = &meson8b_hiu_iface.hw,
1824 [CLKID_ASSIST_MISC] = &meson8b_assist_misc.hw,
1825 [CLKID_SPI] = &meson8b_spi.hw,
1826 [CLKID_I2S_SPDIF] = &meson8b_i2s_spdif.hw,
1827 [CLKID_ETH] = &meson8b_eth.hw,
1828 [CLKID_DEMUX] = &meson8b_demux.hw,
1829 [CLKID_AIU_GLUE] = &meson8b_aiu_glue.hw,
1830 [CLKID_IEC958] = &meson8b_iec958.hw,
1831 [CLKID_I2S_OUT] = &meson8b_i2s_out.hw,
1832 [CLKID_AMCLK] = &meson8b_amclk.hw,
1833 [CLKID_AIFIFO2] = &meson8b_aififo2.hw,
1834 [CLKID_MIXER] = &meson8b_mixer.hw,
1835 [CLKID_MIXER_IFACE] = &meson8b_mixer_iface.hw,
1836 [CLKID_ADC] = &meson8b_adc.hw,
1837 [CLKID_BLKMV] = &meson8b_blkmv.hw,
1838 [CLKID_AIU] = &meson8b_aiu.hw,
1839 [CLKID_UART1] = &meson8b_uart1.hw,
1840 [CLKID_G2D] = &meson8b_g2d.hw,
1841 [CLKID_USB0] = &meson8b_usb0.hw,
1842 [CLKID_USB1] = &meson8b_usb1.hw,
1843 [CLKID_RESET] = &meson8b_reset.hw,
1844 [CLKID_NAND] = &meson8b_nand.hw,
1845 [CLKID_DOS_PARSER] = &meson8b_dos_parser.hw,
1846 [CLKID_USB] = &meson8b_usb.hw,
1847 [CLKID_VDIN1] = &meson8b_vdin1.hw,
1848 [CLKID_AHB_ARB0] = &meson8b_ahb_arb0.hw,
1849 [CLKID_EFUSE] = &meson8b_efuse.hw,
1850 [CLKID_BOOT_ROM] = &meson8b_boot_rom.hw,
1851 [CLKID_AHB_DATA_BUS] = &meson8b_ahb_data_bus.hw,
1852 [CLKID_AHB_CTRL_BUS] = &meson8b_ahb_ctrl_bus.hw,
1853 [CLKID_HDMI_INTR_SYNC] = &meson8b_hdmi_intr_sync.hw,
1854 [CLKID_HDMI_PCLK] = &meson8b_hdmi_pclk.hw,
1855 [CLKID_USB1_DDR_BRIDGE] = &meson8b_usb1_ddr_bridge.hw,
1856 [CLKID_USB0_DDR_BRIDGE] = &meson8b_usb0_ddr_bridge.hw,
1857 [CLKID_MMC_PCLK] = &meson8b_mmc_pclk.hw,
1858 [CLKID_DVIN] = &meson8b_dvin.hw,
1859 [CLKID_UART2] = &meson8b_uart2.hw,
1860 [CLKID_SANA] = &meson8b_sana.hw,
1861 [CLKID_VPU_INTR] = &meson8b_vpu_intr.hw,
1862 [CLKID_SEC_AHB_AHB3_BRIDGE] = &meson8b_sec_ahb_ahb3_bridge.hw,
1863 [CLKID_CLK81_A9] = &meson8b_clk81_a9.hw,
1864 [CLKID_VCLK2_VENCI0] = &meson8b_vclk2_venci0.hw,
1865 [CLKID_VCLK2_VENCI1] = &meson8b_vclk2_venci1.hw,
1866 [CLKID_VCLK2_VENCP0] = &meson8b_vclk2_vencp0.hw,
1867 [CLKID_VCLK2_VENCP1] = &meson8b_vclk2_vencp1.hw,
1868 [CLKID_GCLK_VENCI_INT] = &meson8b_gclk_venci_int.hw,
1869 [CLKID_GCLK_VENCP_INT] = &meson8b_gclk_vencp_int.hw,
1870 [CLKID_DAC_CLK] = &meson8b_dac_clk.hw,
1871 [CLKID_AOCLK_GATE] = &meson8b_aoclk_gate.hw,
1872 [CLKID_IEC958_GATE] = &meson8b_iec958_gate.hw,
1873 [CLKID_ENC480P] = &meson8b_enc480p.hw,
1874 [CLKID_RNG1] = &meson8b_rng1.hw,
1875 [CLKID_GCLK_VENCL_INT] = &meson8b_gclk_vencl_int.hw,
1876 [CLKID_VCLK2_VENCLMCC] = &meson8b_vclk2_venclmcc.hw,
1877 [CLKID_VCLK2_VENCL] = &meson8b_vclk2_vencl.hw,
1878 [CLKID_VCLK2_OTHER] = &meson8b_vclk2_other.hw,
1879 [CLKID_EDP] = &meson8b_edp.hw,
1880 [CLKID_AO_MEDIA_CPU] = &meson8b_ao_media_cpu.hw,
1881 [CLKID_AO_AHB_SRAM] = &meson8b_ao_ahb_sram.hw,
1882 [CLKID_AO_AHB_BUS] = &meson8b_ao_ahb_bus.hw,
1883 [CLKID_AO_IFACE] = &meson8b_ao_iface.hw,
1884 [CLKID_MPLL0] = &meson8b_mpll0.hw,
1885 [CLKID_MPLL1] = &meson8b_mpll1.hw,
1886 [CLKID_MPLL2] = &meson8b_mpll2.hw,
1887 [CLKID_MPLL0_DIV] = &meson8b_mpll0_div.hw,
1888 [CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw,
1889 [CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw,
1890 [CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw,
1891 [CLKID_CPU_IN_DIV2] = &meson8b_cpu_in_div2.hw,
1892 [CLKID_CPU_IN_DIV3] = &meson8b_cpu_in_div3.hw,
1893 [CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw,
1894 [CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw,
1895 [CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw,
1896 [CLKID_FCLK_DIV2_DIV] = &meson8b_fclk_div2_div.hw,
1897 [CLKID_FCLK_DIV3_DIV] = &meson8b_fclk_div3_div.hw,
1898 [CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw,
1899 [CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw,
1900 [CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
1901 [CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
1902 [CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
1903 [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
1904 [CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
1905 [CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
1906 [CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
1907 [CLKID_CPU_CLK_DIV2] = &meson8b_cpu_clk_div2.hw,
1908 [CLKID_CPU_CLK_DIV3] = &meson8b_cpu_clk_div3.hw,
1909 [CLKID_CPU_CLK_DIV4] = &meson8b_cpu_clk_div4.hw,
1910 [CLKID_CPU_CLK_DIV5] = &meson8b_cpu_clk_div5.hw,
1911 [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
1912 [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
1913 [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
1914 [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
1915 [CLKID_APB] = &meson8b_apb_clk_gate.hw,
1916 [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
1917 [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
1918 [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
1919 [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
1920 [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
1921 [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
1922 [CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
1923 [CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
1924 [CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
1925 [CLKID_VID_PLL_IN_EN] = &meson8b_vid_pll_in_en.hw,
1926 [CLKID_VID_PLL_PRE_DIV] = &meson8b_vid_pll_pre_div.hw,
1927 [CLKID_VID_PLL_POST_DIV] = &meson8b_vid_pll_post_div.hw,
1928 [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw,
1929 [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
1930 [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
1931 [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
1932 [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
1933 [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
1934 [CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
1935 [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
1936 [CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
1937 [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
1938 [CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
1939 [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
1940 [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
1941 [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
1942 [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
1943 [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
1944 [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
1945 [CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
1946 [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
1947 [CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
1948 [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
1949 [CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
1950 [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
1951 [CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
1952 [CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
1953 [CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
1954 [CLKID_CTS_ENCP] = &meson8b_cts_encp.hw,
1955 [CLKID_CTS_ENCI_SEL] = &meson8b_cts_enci_sel.hw,
1956 [CLKID_CTS_ENCI] = &meson8b_cts_enci.hw,
1957 [CLKID_HDMI_TX_PIXEL_SEL] = &meson8b_hdmi_tx_pixel_sel.hw,
1958 [CLKID_HDMI_TX_PIXEL] = &meson8b_hdmi_tx_pixel.hw,
1959 [CLKID_CTS_ENCL_SEL] = &meson8b_cts_encl_sel.hw,
1960 [CLKID_CTS_ENCL] = &meson8b_cts_encl.hw,
1961 [CLKID_CTS_VDAC0_SEL] = &meson8b_cts_vdac0_sel.hw,
1962 [CLKID_CTS_VDAC0] = &meson8b_cts_vdac0.hw,
1963 [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
1964 [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
1965 [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
1966 [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
1967 [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
1968 [CLKID_MALI] = &meson8b_mali_0.hw,
1969 [CLK_NR_CLKS] = NULL,
1970 },
1971 .num = CLK_NR_CLKS,
1972};
1973
1662static struct clk_hw_onecell_data meson8b_hw_onecell_data = { 1974static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
1663 .hws = { 1975 .hws = {
1664 [CLKID_XTAL] = &meson8b_xtal.hw, 1976 [CLKID_XTAL] = &meson8b_xtal.hw,
@@ -1781,8 +2093,8 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
1781 [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw, 2093 [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
1782 [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw, 2094 [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
1783 [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw, 2095 [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
1784 [CLKID_ABP_SEL] = &meson8b_abp_clk_sel.hw, 2096 [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
1785 [CLKID_ABP] = &meson8b_abp_clk_gate.hw, 2097 [CLKID_APB] = &meson8b_apb_clk_gate.hw,
1786 [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw, 2098 [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
1787 [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw, 2099 [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
1788 [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw, 2100 [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
@@ -1833,6 +2145,13 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
1833 [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw, 2145 [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
1834 [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw, 2146 [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
1835 [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw, 2147 [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
2148 [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
2149 [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
2150 [CLKID_MALI_0] = &meson8b_mali_0.hw,
2151 [CLKID_MALI_1_SEL] = &meson8b_mali_1_sel.hw,
2152 [CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw,
2153 [CLKID_MALI_1] = &meson8b_mali_1.hw,
2154 [CLKID_MALI] = &meson8b_mali.hw,
1836 [CLK_NR_CLKS] = NULL, 2155 [CLK_NR_CLKS] = NULL,
1837 }, 2156 },
1838 .num = CLK_NR_CLKS, 2157 .num = CLK_NR_CLKS,
@@ -1943,8 +2262,8 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
1943 &meson8b_fixed_pll_dco, 2262 &meson8b_fixed_pll_dco,
1944 &meson8b_hdmi_pll_dco, 2263 &meson8b_hdmi_pll_dco,
1945 &meson8b_sys_pll_dco, 2264 &meson8b_sys_pll_dco,
1946 &meson8b_abp_clk_sel, 2265 &meson8b_apb_clk_sel,
1947 &meson8b_abp_clk_gate, 2266 &meson8b_apb_clk_gate,
1948 &meson8b_periph_clk_sel, 2267 &meson8b_periph_clk_sel,
1949 &meson8b_periph_clk_gate, 2268 &meson8b_periph_clk_gate,
1950 &meson8b_axi_clk_sel, 2269 &meson8b_axi_clk_sel,
@@ -1988,6 +2307,13 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
1988 &meson8b_hdmi_sys_sel, 2307 &meson8b_hdmi_sys_sel,
1989 &meson8b_hdmi_sys_div, 2308 &meson8b_hdmi_sys_div,
1990 &meson8b_hdmi_sys, 2309 &meson8b_hdmi_sys,
2310 &meson8b_mali_0_sel,
2311 &meson8b_mali_0_div,
2312 &meson8b_mali_0,
2313 &meson8b_mali_1_sel,
2314 &meson8b_mali_1_div,
2315 &meson8b_mali_1,
2316 &meson8b_mali,
1991}; 2317};
1992 2318
1993static const struct meson8b_clk_reset_line { 2319static const struct meson8b_clk_reset_line {
@@ -2132,7 +2458,6 @@ static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
2132 2458
2133static struct meson8b_nb_data meson8b_cpu_nb_data = { 2459static struct meson8b_nb_data meson8b_cpu_nb_data = {
2134 .nb.notifier_call = meson8b_cpu_clk_notifier_cb, 2460 .nb.notifier_call = meson8b_cpu_clk_notifier_cb,
2135 .onecell_data = &meson8b_hw_onecell_data,
2136}; 2461};
2137 2462
2138static const struct regmap_config clkc_regmap_config = { 2463static const struct regmap_config clkc_regmap_config = {
@@ -2141,7 +2466,8 @@ static const struct regmap_config clkc_regmap_config = {
2141 .reg_stride = 4, 2466 .reg_stride = 4,
2142}; 2467};
2143 2468
2144static void __init meson8b_clkc_init(struct device_node *np) 2469static void __init meson8b_clkc_init_common(struct device_node *np,
2470 struct clk_hw_onecell_data *clk_hw_onecell_data)
2145{ 2471{
2146 struct meson8b_clk_reset *rstc; 2472 struct meson8b_clk_reset *rstc;
2147 const char *notifier_clk_name; 2473 const char *notifier_clk_name;
@@ -2192,14 +2518,16 @@ static void __init meson8b_clkc_init(struct device_node *np)
2192 */ 2518 */
2193 for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) { 2519 for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
2194 /* array might be sparse */ 2520 /* array might be sparse */
2195 if (!meson8b_hw_onecell_data.hws[i]) 2521 if (!clk_hw_onecell_data->hws[i])
2196 continue; 2522 continue;
2197 2523
2198 ret = clk_hw_register(NULL, meson8b_hw_onecell_data.hws[i]); 2524 ret = clk_hw_register(NULL, clk_hw_onecell_data->hws[i]);
2199 if (ret) 2525 if (ret)
2200 return; 2526 return;
2201 } 2527 }
2202 2528
2529 meson8b_cpu_nb_data.onecell_data = clk_hw_onecell_data;
2530
2203 /* 2531 /*
2204 * FIXME we shouldn't program the muxes in notifier handlers. The 2532 * FIXME we shouldn't program the muxes in notifier handlers. The
2205 * tricky programming sequence will be handled by the forthcoming 2533 * tricky programming sequence will be handled by the forthcoming
@@ -2215,13 +2543,23 @@ static void __init meson8b_clkc_init(struct device_node *np)
2215 } 2543 }
2216 2544
2217 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, 2545 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
2218 &meson8b_hw_onecell_data); 2546 clk_hw_onecell_data);
2219 if (ret) 2547 if (ret)
2220 pr_err("%s: failed to register clock provider\n", __func__); 2548 pr_err("%s: failed to register clock provider\n", __func__);
2221} 2549}
2222 2550
2551static void __init meson8_clkc_init(struct device_node *np)
2552{
2553 return meson8b_clkc_init_common(np, &meson8_hw_onecell_data);
2554}
2555
2556static void __init meson8b_clkc_init(struct device_node *np)
2557{
2558 return meson8b_clkc_init_common(np, &meson8b_hw_onecell_data);
2559}
2560
2223CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc", 2561CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc",
2224 meson8b_clkc_init); 2562 meson8_clkc_init);
2225CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc", 2563CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc",
2226 meson8b_clkc_init); 2564 meson8b_clkc_init);
2227CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc", 2565CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc",
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index 87fba739af81..b8c58faeae52 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -33,6 +33,7 @@
33#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */ 33#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
34#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */ 34#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
35#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */ 35#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
36#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
36#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */ 37#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
37#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */ 38#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
38#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */ 39#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
@@ -91,7 +92,7 @@
91#define CLKID_CPU_CLK_DIV6 120 92#define CLKID_CPU_CLK_DIV6 120
92#define CLKID_CPU_CLK_DIV7 121 93#define CLKID_CPU_CLK_DIV7 121
93#define CLKID_CPU_CLK_DIV8 122 94#define CLKID_CPU_CLK_DIV8 122
94#define CLKID_ABP_SEL 123 95#define CLKID_APB_SEL 123
95#define CLKID_PERIPH_SEL 125 96#define CLKID_PERIPH_SEL 125
96#define CLKID_AXI_SEL 127 97#define CLKID_AXI_SEL 127
97#define CLKID_L2_DRAM_SEL 129 98#define CLKID_L2_DRAM_SEL 129
@@ -139,8 +140,14 @@
139#define CLKID_HDMI_SYS_SEL 172 140#define CLKID_HDMI_SYS_SEL 172
140#define CLKID_HDMI_SYS_DIV 173 141#define CLKID_HDMI_SYS_DIV 173
141#define CLKID_HDMI_SYS 174 142#define CLKID_HDMI_SYS 174
143#define CLKID_MALI_0_SEL 175
144#define CLKID_MALI_0_DIV 176
145#define CLKID_MALI_0 177
146#define CLKID_MALI_1_SEL 178
147#define CLKID_MALI_1_DIV 179
148#define CLKID_MALI_1 180
142 149
143#define CLK_NR_CLKS 175 150#define CLK_NR_CLKS 181
144 151
145/* 152/*
146 * include the CLKID and RESETID that have 153 * include the CLKID and RESETID that have
diff --git a/drivers/clk/meson/parm.h b/drivers/clk/meson/parm.h
new file mode 100644
index 000000000000..3c9ef1b505ce
--- /dev/null
+++ b/drivers/clk/meson/parm.h
@@ -0,0 +1,46 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2015 Endless Mobile, Inc.
4 * Author: Carlo Caione <carlo@endlessm.com>
5 */
6
7#ifndef __MESON_PARM_H
8#define __MESON_PARM_H
9
10#include <linux/bits.h>
11#include <linux/regmap.h>
12
13#define PMASK(width) GENMASK(width - 1, 0)
14#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift)
15#define CLRPMASK(width, shift) (~SETPMASK(width, shift))
16
17#define PARM_GET(width, shift, reg) \
18 (((reg) & SETPMASK(width, shift)) >> (shift))
19#define PARM_SET(width, shift, reg, val) \
20 (((reg) & CLRPMASK(width, shift)) | ((val) << (shift)))
21
22#define MESON_PARM_APPLICABLE(p) (!!((p)->width))
23
24struct parm {
25 u16 reg_off;
26 u8 shift;
27 u8 width;
28};
29
30static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
31{
32 unsigned int val;
33
34 regmap_read(map, p->reg_off, &val);
35 return PARM_GET(p->width, p->shift, val);
36}
37
38static inline void meson_parm_write(struct regmap *map, struct parm *p,
39 unsigned int val)
40{
41 regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
42 val << p->shift);
43}
44
45#endif /* __MESON_PARM_H */
46
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index bc64019b8eeb..3acf03780221 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -16,7 +16,11 @@
16 * duty_cycle = (1 + hi) / (1 + val) 16 * duty_cycle = (1 + hi) / (1 + val)
17 */ 17 */
18 18
19#include "clkc-audio.h" 19#include <linux/clk-provider.h>
20#include <linux/module.h>
21
22#include "clk-regmap.h"
23#include "sclk-div.h"
20 24
21static inline struct meson_sclk_div_data * 25static inline struct meson_sclk_div_data *
22meson_sclk_div_data(struct clk_regmap *clk) 26meson_sclk_div_data(struct clk_regmap *clk)
@@ -241,3 +245,7 @@ const struct clk_ops meson_sclk_div_ops = {
241 .init = sclk_div_init, 245 .init = sclk_div_init,
242}; 246};
243EXPORT_SYMBOL_GPL(meson_sclk_div_ops); 247EXPORT_SYMBOL_GPL(meson_sclk_div_ops);
248
249MODULE_DESCRIPTION("Amlogic Sample divider driver");
250MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
251MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clkc-audio.h b/drivers/clk/meson/sclk-div.h
index 0a7c157ebf81..b64b2a32005f 100644
--- a/drivers/clk/meson/clkc-audio.h
+++ b/drivers/clk/meson/sclk-div.h
@@ -4,16 +4,11 @@
4 * Author: Jerome Brunet <jbrunet@baylibre.com> 4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */ 5 */
6 6
7#ifndef __MESON_CLKC_AUDIO_H 7#ifndef __MESON_SCLK_DIV_H
8#define __MESON_CLKC_AUDIO_H 8#define __MESON_SCLK_DIV_H
9 9
10#include "clkc.h" 10#include <linux/clk-provider.h>
11 11#include "parm.h"
12struct meson_clk_triphase_data {
13 struct parm ph0;
14 struct parm ph1;
15 struct parm ph2;
16};
17 12
18struct meson_sclk_div_data { 13struct meson_sclk_div_data {
19 struct parm div; 14 struct parm div;
@@ -22,7 +17,6 @@ struct meson_sclk_div_data {
22 struct clk_duty cached_duty; 17 struct clk_duty cached_duty;
23}; 18};
24 19
25extern const struct clk_ops meson_clk_triphase_ops;
26extern const struct clk_ops meson_sclk_div_ops; 20extern const struct clk_ops meson_sclk_div_ops;
27 21
28#endif /* __MESON_CLKC_AUDIO_H */ 22#endif /* __MESON_SCLK_DIV_H */
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index 88af0e282ea0..08bcc01c0923 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -5,7 +5,10 @@
5 */ 5 */
6 6
7#include <linux/clk-provider.h> 7#include <linux/clk-provider.h>
8#include "clkc.h" 8#include <linux/module.h>
9
10#include "clk-regmap.h"
11#include "vid-pll-div.h"
9 12
10static inline struct meson_vid_pll_div_data * 13static inline struct meson_vid_pll_div_data *
11meson_vid_pll_div_data(struct clk_regmap *clk) 14meson_vid_pll_div_data(struct clk_regmap *clk)
@@ -89,3 +92,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
89const struct clk_ops meson_vid_pll_div_ro_ops = { 92const struct clk_ops meson_vid_pll_div_ro_ops = {
90 .recalc_rate = meson_vid_pll_div_recalc_rate, 93 .recalc_rate = meson_vid_pll_div_recalc_rate,
91}; 94};
95EXPORT_SYMBOL_GPL(meson_vid_pll_div_ro_ops);
96
97MODULE_DESCRIPTION("Amlogic video pll divider driver");
98MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
99MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/vid-pll-div.h b/drivers/clk/meson/vid-pll-div.h
new file mode 100644
index 000000000000..c0128e33ccf9
--- /dev/null
+++ b/drivers/clk/meson/vid-pll-div.h
@@ -0,0 +1,20 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_VID_PLL_DIV_H
8#define __MESON_VID_PLL_DIV_H
9
10#include <linux/clk-provider.h>
11#include "parm.h"
12
13struct meson_vid_pll_div_data {
14 struct parm val;
15 struct parm sel;
16};
17
18extern const struct clk_ops meson_vid_pll_div_ro_ops;
19
20#endif /* __MESON_VID_PLL_DIV_H */
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 61fefc046ec5..a60a1be937ad 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -53,7 +53,6 @@
53#define APMU_DISP1 0x110 53#define APMU_DISP1 0x110
54#define APMU_CCIC0 0x50 54#define APMU_CCIC0 0x50
55#define APMU_CCIC1 0xf4 55#define APMU_CCIC1 0xf4
56#define APMU_SP 0x68
57#define MPMU_UART_PLL 0x14 56#define MPMU_UART_PLL 0x14
58 57
59struct mmp2_clk_unit { 58struct mmp2_clk_unit {
@@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = {
210 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32), 209 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
211}; 210};
212 211
213static DEFINE_SPINLOCK(sp_lock);
214
215static struct mmp_param_mux_clk apmu_mux_clks[] = { 212static struct mmp_param_mux_clk apmu_mux_clks[] = {
216 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock}, 213 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
217 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, 214 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
@@ -232,9 +229,10 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
232 {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, 229 {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
233 {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, 230 {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
234 {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, 231 {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
235 {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock}, 232 {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock},
233 {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock},
236 {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock}, 234 {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
237 {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock}, 235 {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x09, 0x09, 0x0, 0, &disp1_lock},
238 {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock}, 236 {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock},
239 {MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock}, 237 {MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
240 {MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock}, 238 {MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
@@ -242,7 +240,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
242 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, 240 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
243 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, 241 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
244 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, 242 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
245 {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock},
246}; 243};
247 244
248static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) 245static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 7dedfaa6e152..5c6bbee396b3 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -175,8 +175,10 @@ static void __init a370_clk_init(struct device_node *np)
175 175
176 mvebu_coreclk_setup(np, &a370_coreclks); 176 mvebu_coreclk_setup(np, &a370_coreclks);
177 177
178 if (cgnp) 178 if (cgnp) {
179 mvebu_clk_gating_setup(cgnp, a370_gating_desc); 179 mvebu_clk_gating_setup(cgnp, a370_gating_desc);
180 of_node_put(cgnp);
181 }
180} 182}
181CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init); 183CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
182 184
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index e8f03293ec83..fa1568279c23 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -226,7 +226,9 @@ static void __init axp_clk_init(struct device_node *np)
226 226
227 mvebu_coreclk_setup(np, &axp_coreclks); 227 mvebu_coreclk_setup(np, &axp_coreclks);
228 228
229 if (cgnp) 229 if (cgnp) {
230 mvebu_clk_gating_setup(cgnp, axp_gating_desc); 230 mvebu_clk_gating_setup(cgnp, axp_gating_desc);
231 of_node_put(cgnp);
232 }
231} 233}
232CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init); 234CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index e0dd99f36bf4..0bd09d33f9cf 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -188,10 +188,14 @@ static void __init dove_clk_init(struct device_node *np)
188 188
189 mvebu_coreclk_setup(np, &dove_coreclks); 189 mvebu_coreclk_setup(np, &dove_coreclks);
190 190
191 if (ddnp) 191 if (ddnp) {
192 dove_divider_clk_init(ddnp); 192 dove_divider_clk_init(ddnp);
193 of_node_put(ddnp);
194 }
193 195
194 if (cgnp) 196 if (cgnp) {
195 mvebu_clk_gating_setup(cgnp, dove_gating_desc); 197 mvebu_clk_gating_setup(cgnp, dove_gating_desc);
198 of_node_put(cgnp);
199 }
196} 200}
197CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init); 201CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 6f784167bda4..35af3aa18f1c 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -331,6 +331,8 @@ static void __init kirkwood_clk_init(struct device_node *np)
331 if (cgnp) { 331 if (cgnp) {
332 mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc); 332 mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
333 kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc); 333 kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc);
334
335 of_node_put(cgnp);
334 } 336 }
335} 337}
336CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", 338CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
diff --git a/drivers/clk/mvebu/mv98dx3236.c b/drivers/clk/mvebu/mv98dx3236.c
index 0a74cf7a7725..1c8ab4f834ba 100644
--- a/drivers/clk/mvebu/mv98dx3236.c
+++ b/drivers/clk/mvebu/mv98dx3236.c
@@ -172,7 +172,9 @@ static void __init mv98dx3236_clk_init(struct device_node *np)
172 172
173 mvebu_coreclk_setup(np, &mv98dx3236_core_clocks); 173 mvebu_coreclk_setup(np, &mv98dx3236_core_clocks);
174 174
175 if (cgnp) 175 if (cgnp) {
176 mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc); 176 mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc);
177 of_node_put(cgnp);
178 }
177} 179}
178CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init); 180CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1b1ba54e33dd..1c04575c118f 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -215,6 +215,7 @@ config MSM_MMCC_8996
215 215
216config MSM_GCC_8998 216config MSM_GCC_8998
217 tristate "MSM8998 Global Clock Controller" 217 tristate "MSM8998 Global Clock Controller"
218 select QCOM_GDSC
218 help 219 help
219 Support for the global clock controller on msm8998 devices. 220 Support for the global clock controller on msm8998 devices.
220 Say Y if you want to use peripheral devices such as UART, SPI, 221 Say Y if you want to use peripheral devices such as UART, SPI,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index e5eca8a1abe4..c25b57c3cbc8 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -71,7 +71,6 @@ struct src_sel {
71 * @freq_tbl: frequency table 71 * @freq_tbl: frequency table
72 * @clkr: regmap clock handle 72 * @clkr: regmap clock handle
73 * @lock: register lock 73 * @lock: register lock
74 *
75 */ 74 */
76struct clk_rcg { 75struct clk_rcg {
77 u32 ns_reg; 76 u32 ns_reg;
@@ -107,7 +106,6 @@ extern const struct clk_ops clk_rcg_lcc_ops;
107 * @freq_tbl: frequency table 106 * @freq_tbl: frequency table
108 * @clkr: regmap clock handle 107 * @clkr: regmap clock handle
109 * @lock: register lock 108 * @lock: register lock
110 *
111 */ 109 */
112struct clk_dyn_rcg { 110struct clk_dyn_rcg {
113 u32 ns_reg[2]; 111 u32 ns_reg[2];
@@ -140,7 +138,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
140 * @parent_map: map from software's parent index to hardware's src_sel field 138 * @parent_map: map from software's parent index to hardware's src_sel field
141 * @freq_tbl: frequency table 139 * @freq_tbl: frequency table
142 * @clkr: regmap clock handle 140 * @clkr: regmap clock handle
143 * 141 * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
144 */ 142 */
145struct clk_rcg2 { 143struct clk_rcg2 {
146 u32 cmd_rcgr; 144 u32 cmd_rcgr;
@@ -150,6 +148,7 @@ struct clk_rcg2 {
150 const struct parent_map *parent_map; 148 const struct parent_map *parent_map;
151 const struct freq_tbl *freq_tbl; 149 const struct freq_tbl *freq_tbl;
152 struct clk_regmap clkr; 150 struct clk_regmap clkr;
151 u8 cfg_off;
153}; 152};
154 153
155#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr) 154#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 6e3bd195d012..8c02bffe50df 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -41,6 +41,11 @@
41#define N_REG 0xc 41#define N_REG 0xc
42#define D_REG 0x10 42#define D_REG 0x10
43 43
44#define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
45#define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
46#define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
47#define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
48
44/* Dynamic Frequency Scaling */ 49/* Dynamic Frequency Scaling */
45#define MAX_PERF_LEVEL 8 50#define MAX_PERF_LEVEL 8
46#define SE_CMD_DFSR_OFFSET 0x14 51#define SE_CMD_DFSR_OFFSET 0x14
@@ -74,7 +79,7 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw)
74 u32 cfg; 79 u32 cfg;
75 int i, ret; 80 int i, ret;
76 81
77 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 82 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
78 if (ret) 83 if (ret)
79 goto err; 84 goto err;
80 85
@@ -123,7 +128,7 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
123 int ret; 128 int ret;
124 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 129 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
125 130
126 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 131 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
127 CFG_SRC_SEL_MASK, cfg); 132 CFG_SRC_SEL_MASK, cfg);
128 if (ret) 133 if (ret)
129 return ret; 134 return ret;
@@ -162,13 +167,13 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
162 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 167 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
163 u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask; 168 u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
164 169
165 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 170 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
166 171
167 if (rcg->mnd_width) { 172 if (rcg->mnd_width) {
168 mask = BIT(rcg->mnd_width) - 1; 173 mask = BIT(rcg->mnd_width) - 1;
169 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m); 174 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
170 m &= mask; 175 m &= mask;
171 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n); 176 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
172 n = ~n; 177 n = ~n;
173 n &= mask; 178 n &= mask;
174 n += m; 179 n += m;
@@ -263,17 +268,17 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
263 if (rcg->mnd_width && f->n) { 268 if (rcg->mnd_width && f->n) {
264 mask = BIT(rcg->mnd_width) - 1; 269 mask = BIT(rcg->mnd_width) - 1;
265 ret = regmap_update_bits(rcg->clkr.regmap, 270 ret = regmap_update_bits(rcg->clkr.regmap,
266 rcg->cmd_rcgr + M_REG, mask, f->m); 271 RCG_M_OFFSET(rcg), mask, f->m);
267 if (ret) 272 if (ret)
268 return ret; 273 return ret;
269 274
270 ret = regmap_update_bits(rcg->clkr.regmap, 275 ret = regmap_update_bits(rcg->clkr.regmap,
271 rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m)); 276 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
272 if (ret) 277 if (ret)
273 return ret; 278 return ret;
274 279
275 ret = regmap_update_bits(rcg->clkr.regmap, 280 ret = regmap_update_bits(rcg->clkr.regmap,
276 rcg->cmd_rcgr + D_REG, mask, ~f->n); 281 RCG_D_OFFSET(rcg), mask, ~f->n);
277 if (ret) 282 if (ret)
278 return ret; 283 return ret;
279 } 284 }
@@ -284,8 +289,7 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
284 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 289 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
285 if (rcg->mnd_width && f->n && (f->m != f->n)) 290 if (rcg->mnd_width && f->n && (f->m != f->n))
286 cfg |= CFG_MODE_DUAL_EDGE; 291 cfg |= CFG_MODE_DUAL_EDGE;
287 292 return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
288 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
289 mask, cfg); 293 mask, cfg);
290} 294}
291 295
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 9f4fc7773fb2..c3fd632af119 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -18,6 +18,31 @@
18#define CLK_RPMH_ARC_EN_OFFSET 0 18#define CLK_RPMH_ARC_EN_OFFSET 0
19#define CLK_RPMH_VRM_EN_OFFSET 4 19#define CLK_RPMH_VRM_EN_OFFSET 4
20 20
21#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
22#define BCM_TCS_CMD_VALID_SHIFT 29
23#define BCM_TCS_CMD_VOTE_MASK 0x3fff
24#define BCM_TCS_CMD_VOTE_SHIFT 0
25
26#define BCM_TCS_CMD(valid, vote) \
27 (BCM_TCS_CMD_COMMIT_MASK | \
28 ((valid) << BCM_TCS_CMD_VALID_SHIFT) | \
29 ((vote & BCM_TCS_CMD_VOTE_MASK) \
30 << BCM_TCS_CMD_VOTE_SHIFT))
31
32/**
33 * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM)
34 * @unit: divisor used to convert Hz value to an RPMh msg
35 * @width: multiplier used to convert Hz value to an RPMh msg
36 * @vcd: virtual clock domain that this bcm belongs to
37 * @reserved: reserved to pad the struct
38 */
39struct bcm_db {
40 __le32 unit;
41 __le16 width;
42 u8 vcd;
43 u8 reserved;
44};
45
21/** 46/**
22 * struct clk_rpmh - individual rpmh clock data structure 47 * struct clk_rpmh - individual rpmh clock data structure
23 * @hw: handle between common and hardware-specific interfaces 48 * @hw: handle between common and hardware-specific interfaces
@@ -29,6 +54,7 @@
29 * @aggr_state: rpmh clock aggregated state 54 * @aggr_state: rpmh clock aggregated state
30 * @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh 55 * @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh
31 * @valid_state_mask: mask to determine the state of the rpmh clock 56 * @valid_state_mask: mask to determine the state of the rpmh clock
57 * @unit: divisor to convert rate to rpmh msg in magnitudes of Khz
32 * @dev: device to which it is attached 58 * @dev: device to which it is attached
33 * @peer: pointer to the clock rpmh sibling 59 * @peer: pointer to the clock rpmh sibling
34 */ 60 */
@@ -42,6 +68,7 @@ struct clk_rpmh {
42 u32 aggr_state; 68 u32 aggr_state;
43 u32 last_sent_aggr_state; 69 u32 last_sent_aggr_state;
44 u32 valid_state_mask; 70 u32 valid_state_mask;
71 u32 unit;
45 struct device *dev; 72 struct device *dev;
46 struct clk_rpmh *peer; 73 struct clk_rpmh *peer;
47}; 74};
@@ -98,6 +125,17 @@ static DEFINE_MUTEX(rpmh_clk_lock);
98 __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \ 125 __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
99 CLK_RPMH_VRM_EN_OFFSET, 1, _div) 126 CLK_RPMH_VRM_EN_OFFSET, 1, _div)
100 127
128#define DEFINE_CLK_RPMH_BCM(_platform, _name, _res_name) \
129 static struct clk_rpmh _platform##_##_name = { \
130 .res_name = _res_name, \
131 .valid_state_mask = BIT(RPMH_ACTIVE_ONLY_STATE), \
132 .div = 1, \
133 .hw.init = &(struct clk_init_data){ \
134 .ops = &clk_rpmh_bcm_ops, \
135 .name = #_name, \
136 }, \
137 }
138
101static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw) 139static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw)
102{ 140{
103 return container_of(_hw, struct clk_rpmh, hw); 141 return container_of(_hw, struct clk_rpmh, hw);
@@ -210,6 +248,96 @@ static const struct clk_ops clk_rpmh_ops = {
210 .recalc_rate = clk_rpmh_recalc_rate, 248 .recalc_rate = clk_rpmh_recalc_rate,
211}; 249};
212 250
251static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
252{
253 struct tcs_cmd cmd = { 0 };
254 u32 cmd_state;
255 int ret;
256
257 mutex_lock(&rpmh_clk_lock);
258
259 cmd_state = 0;
260 if (enable) {
261 cmd_state = 1;
262 if (c->aggr_state)
263 cmd_state = c->aggr_state;
264 }
265
266 if (c->last_sent_aggr_state == cmd_state) {
267 mutex_unlock(&rpmh_clk_lock);
268 return 0;
269 }
270
271 cmd.addr = c->res_addr;
272 cmd.data = BCM_TCS_CMD(enable, cmd_state);
273
274 ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
275 if (ret) {
276 dev_err(c->dev, "set active state of %s failed: (%d)\n",
277 c->res_name, ret);
278 mutex_unlock(&rpmh_clk_lock);
279 return ret;
280 }
281
282 c->last_sent_aggr_state = cmd_state;
283
284 mutex_unlock(&rpmh_clk_lock);
285
286 return 0;
287}
288
289static int clk_rpmh_bcm_prepare(struct clk_hw *hw)
290{
291 struct clk_rpmh *c = to_clk_rpmh(hw);
292
293 return clk_rpmh_bcm_send_cmd(c, true);
294};
295
296static void clk_rpmh_bcm_unprepare(struct clk_hw *hw)
297{
298 struct clk_rpmh *c = to_clk_rpmh(hw);
299
300 clk_rpmh_bcm_send_cmd(c, false);
301};
302
303static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
304 unsigned long parent_rate)
305{
306 struct clk_rpmh *c = to_clk_rpmh(hw);
307
308 c->aggr_state = rate / c->unit;
309 /*
310 * Since any non-zero value sent to hw would result in enabling the
311 * clock, only send the value if the clock has already been prepared.
312 */
313 if (clk_hw_is_prepared(hw))
314 clk_rpmh_bcm_send_cmd(c, true);
315
316 return 0;
317};
318
319static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate,
320 unsigned long *parent_rate)
321{
322 return rate;
323}
324
325static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
326 unsigned long prate)
327{
328 struct clk_rpmh *c = to_clk_rpmh(hw);
329
330 return c->aggr_state * c->unit;
331}
332
333static const struct clk_ops clk_rpmh_bcm_ops = {
334 .prepare = clk_rpmh_bcm_prepare,
335 .unprepare = clk_rpmh_bcm_unprepare,
336 .set_rate = clk_rpmh_bcm_set_rate,
337 .round_rate = clk_rpmh_round_rate,
338 .recalc_rate = clk_rpmh_bcm_recalc_rate,
339};
340
213/* Resource name must match resource id present in cmd-db. */ 341/* Resource name must match resource id present in cmd-db. */
214DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2); 342DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
215DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2); 343DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
@@ -217,6 +345,7 @@ DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
217DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1); 345DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1);
218DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1); 346DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
219DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1); 347DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
348DEFINE_CLK_RPMH_BCM(sdm845, ipa, "IP0");
220 349
221static struct clk_hw *sdm845_rpmh_clocks[] = { 350static struct clk_hw *sdm845_rpmh_clocks[] = {
222 [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw, 351 [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
@@ -231,6 +360,7 @@ static struct clk_hw *sdm845_rpmh_clocks[] = {
231 [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw, 360 [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
232 [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw, 361 [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
233 [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw, 362 [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
363 [RPMH_IPA_CLK] = &sdm845_ipa.hw,
234}; 364};
235 365
236static const struct clk_rpmh_desc clk_rpmh_sdm845 = { 366static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
@@ -267,6 +397,8 @@ static int clk_rpmh_probe(struct platform_device *pdev)
267 397
268 for (i = 0; i < desc->num_clks; i++) { 398 for (i = 0; i < desc->num_clks; i++) {
269 u32 res_addr; 399 u32 res_addr;
400 size_t aux_data_len;
401 const struct bcm_db *data;
270 402
271 rpmh_clk = to_clk_rpmh(hw_clks[i]); 403 rpmh_clk = to_clk_rpmh(hw_clks[i]);
272 res_addr = cmd_db_read_addr(rpmh_clk->res_name); 404 res_addr = cmd_db_read_addr(rpmh_clk->res_name);
@@ -275,6 +407,20 @@ static int clk_rpmh_probe(struct platform_device *pdev)
275 rpmh_clk->res_name); 407 rpmh_clk->res_name);
276 return -ENODEV; 408 return -ENODEV;
277 } 409 }
410
411 data = cmd_db_read_aux_data(rpmh_clk->res_name, &aux_data_len);
412 if (IS_ERR(data)) {
413 ret = PTR_ERR(data);
414 dev_err(&pdev->dev,
415 "error reading RPMh aux data for %s (%d)\n",
416 rpmh_clk->res_name, ret);
417 return ret;
418 }
419
420 /* Convert unit from Khz to Hz */
421 if (aux_data_len == sizeof(*data))
422 rpmh_clk->unit = le32_to_cpu(data->unit) * 1000ULL;
423
278 rpmh_clk->res_addr += res_addr; 424 rpmh_clk->res_addr += res_addr;
279 rpmh_clk->dev = &pdev->dev; 425 rpmh_clk->dev = &pdev->dev;
280 426
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index d3aadaeb2903..22dd42ad9223 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -655,10 +655,73 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
655 .num_clks = ARRAY_SIZE(qcs404_clks), 655 .num_clks = ARRAY_SIZE(qcs404_clks),
656}; 656};
657 657
658/* msm8998 */
659DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
660DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
661DEFINE_CLK_SMD_RPM(msm8998, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
662DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, div_clk1, div_clk1_a, 0xb);
663DEFINE_CLK_SMD_RPM(msm8998, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
664DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, ln_bb_clk1, ln_bb_clk1_a, 1);
665DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, ln_bb_clk2, ln_bb_clk2_a, 2);
666DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, ln_bb_clk3_pin, ln_bb_clk3_a_pin,
667 3);
668DEFINE_CLK_SMD_RPM(msm8998, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
669 QCOM_SMD_RPM_MMAXI_CLK, 0);
670DEFINE_CLK_SMD_RPM(msm8998, aggre1_noc_clk, aggre1_noc_a_clk,
671 QCOM_SMD_RPM_AGGR_CLK, 1);
672DEFINE_CLK_SMD_RPM(msm8998, aggre2_noc_clk, aggre2_noc_a_clk,
673 QCOM_SMD_RPM_AGGR_CLK, 2);
674DEFINE_CLK_SMD_RPM_QDSS(msm8998, qdss_clk, qdss_a_clk,
675 QCOM_SMD_RPM_MISC_CLK, 1);
676DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk1, rf_clk1_a, 4);
677DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
678DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
679DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
680static struct clk_smd_rpm *msm8998_clks[] = {
681 [RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
682 [RPM_SMD_SNOC_A_CLK] = &msm8998_snoc_a_clk,
683 [RPM_SMD_CNOC_CLK] = &msm8998_cnoc_clk,
684 [RPM_SMD_CNOC_A_CLK] = &msm8998_cnoc_a_clk,
685 [RPM_SMD_CE1_CLK] = &msm8998_ce1_clk,
686 [RPM_SMD_CE1_A_CLK] = &msm8998_ce1_a_clk,
687 [RPM_SMD_DIV_CLK1] = &msm8998_div_clk1,
688 [RPM_SMD_DIV_A_CLK1] = &msm8998_div_clk1_a,
689 [RPM_SMD_IPA_CLK] = &msm8998_ipa_clk,
690 [RPM_SMD_IPA_A_CLK] = &msm8998_ipa_a_clk,
691 [RPM_SMD_LN_BB_CLK1] = &msm8998_ln_bb_clk1,
692 [RPM_SMD_LN_BB_CLK1_A] = &msm8998_ln_bb_clk1_a,
693 [RPM_SMD_LN_BB_CLK2] = &msm8998_ln_bb_clk2,
694 [RPM_SMD_LN_BB_CLK2_A] = &msm8998_ln_bb_clk2_a,
695 [RPM_SMD_LN_BB_CLK3_PIN] = &msm8998_ln_bb_clk3_pin,
696 [RPM_SMD_LN_BB_CLK3_A_PIN] = &msm8998_ln_bb_clk3_a_pin,
697 [RPM_SMD_MMAXI_CLK] = &msm8998_mmssnoc_axi_rpm_clk,
698 [RPM_SMD_MMAXI_A_CLK] = &msm8998_mmssnoc_axi_rpm_a_clk,
699 [RPM_SMD_AGGR1_NOC_CLK] = &msm8998_aggre1_noc_clk,
700 [RPM_SMD_AGGR1_NOC_A_CLK] = &msm8998_aggre1_noc_a_clk,
701 [RPM_SMD_AGGR2_NOC_CLK] = &msm8998_aggre2_noc_clk,
702 [RPM_SMD_AGGR2_NOC_A_CLK] = &msm8998_aggre2_noc_a_clk,
703 [RPM_SMD_QDSS_CLK] = &msm8998_qdss_clk,
704 [RPM_SMD_QDSS_A_CLK] = &msm8998_qdss_a_clk,
705 [RPM_SMD_RF_CLK1] = &msm8998_rf_clk1,
706 [RPM_SMD_RF_CLK1_A] = &msm8998_rf_clk1_a,
707 [RPM_SMD_RF_CLK2_PIN] = &msm8998_rf_clk2_pin,
708 [RPM_SMD_RF_CLK2_A_PIN] = &msm8998_rf_clk2_a_pin,
709 [RPM_SMD_RF_CLK3] = &msm8998_rf_clk3,
710 [RPM_SMD_RF_CLK3_A] = &msm8998_rf_clk3_a,
711 [RPM_SMD_RF_CLK3_PIN] = &msm8998_rf_clk3_pin,
712 [RPM_SMD_RF_CLK3_A_PIN] = &msm8998_rf_clk3_a_pin,
713};
714
715static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
716 .clks = msm8998_clks,
717 .num_clks = ARRAY_SIZE(msm8998_clks),
718};
719
658static const struct of_device_id rpm_smd_clk_match_table[] = { 720static const struct of_device_id rpm_smd_clk_match_table[] = {
659 { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, 721 { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
660 { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, 722 { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
661 { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 }, 723 { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
724 { .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 },
662 { .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 }, 725 { .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
663 { } 726 { }
664}; 727};
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 0a48ed56833b..a6b2f86112d8 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -231,6 +231,8 @@ int qcom_cc_really_probe(struct platform_device *pdev,
231 struct gdsc_desc *scd; 231 struct gdsc_desc *scd;
232 size_t num_clks = desc->num_clks; 232 size_t num_clks = desc->num_clks;
233 struct clk_regmap **rclks = desc->clks; 233 struct clk_regmap **rclks = desc->clks;
234 size_t num_clk_hws = desc->num_clk_hws;
235 struct clk_hw **clk_hws = desc->clk_hws;
234 236
235 cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL); 237 cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
236 if (!cc) 238 if (!cc)
@@ -269,6 +271,12 @@ int qcom_cc_really_probe(struct platform_device *pdev,
269 271
270 qcom_cc_drop_protected(dev, cc); 272 qcom_cc_drop_protected(dev, cc);
271 273
274 for (i = 0; i < num_clk_hws; i++) {
275 ret = devm_clk_hw_register(dev, clk_hws[i]);
276 if (ret)
277 return ret;
278 }
279
272 for (i = 0; i < num_clks; i++) { 280 for (i = 0; i < num_clks; i++) {
273 if (!rclks[i]) 281 if (!rclks[i])
274 continue; 282 continue;
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 4aa33ee70bae..1e2a8bdac55a 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -27,6 +27,8 @@ struct qcom_cc_desc {
27 size_t num_resets; 27 size_t num_resets;
28 struct gdsc **gdscs; 28 struct gdsc **gdscs;
29 size_t num_gdscs; 29 size_t num_gdscs;
30 struct clk_hw **clk_hws;
31 size_t num_clk_hws;
30}; 32};
31 33
32/** 34/**
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 505c6263141d..0e32892b438c 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -4715,18 +4715,12 @@ static const struct qcom_cc_desc gcc_ipq8074_desc = {
4715 .num_clks = ARRAY_SIZE(gcc_ipq8074_clks), 4715 .num_clks = ARRAY_SIZE(gcc_ipq8074_clks),
4716 .resets = gcc_ipq8074_resets, 4716 .resets = gcc_ipq8074_resets,
4717 .num_resets = ARRAY_SIZE(gcc_ipq8074_resets), 4717 .num_resets = ARRAY_SIZE(gcc_ipq8074_resets),
4718 .clk_hws = gcc_ipq8074_hws,
4719 .num_clk_hws = ARRAY_SIZE(gcc_ipq8074_hws),
4718}; 4720};
4719 4721
4720static int gcc_ipq8074_probe(struct platform_device *pdev) 4722static int gcc_ipq8074_probe(struct platform_device *pdev)
4721{ 4723{
4722 int ret, i;
4723
4724 for (i = 0; i < ARRAY_SIZE(gcc_ipq8074_hws); i++) {
4725 ret = devm_clk_hw_register(&pdev->dev, gcc_ipq8074_hws[i]);
4726 if (ret)
4727 return ret;
4728 }
4729
4730 return qcom_cc_probe(pdev, &gcc_ipq8074_desc); 4724 return qcom_cc_probe(pdev, &gcc_ipq8074_desc);
4731} 4725}
4732 4726
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
index 849046fbed6d..8c6d93144b9c 100644
--- a/drivers/clk/qcom/gcc-mdm9615.c
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -1702,6 +1702,8 @@ static const struct qcom_cc_desc gcc_mdm9615_desc = {
1702 .num_clks = ARRAY_SIZE(gcc_mdm9615_clks), 1702 .num_clks = ARRAY_SIZE(gcc_mdm9615_clks),
1703 .resets = gcc_mdm9615_resets, 1703 .resets = gcc_mdm9615_resets,
1704 .num_resets = ARRAY_SIZE(gcc_mdm9615_resets), 1704 .num_resets = ARRAY_SIZE(gcc_mdm9615_resets),
1705 .clk_hws = gcc_mdm9615_hws,
1706 .num_clk_hws = ARRAY_SIZE(gcc_mdm9615_hws),
1705}; 1707};
1706 1708
1707static const struct of_device_id gcc_mdm9615_match_table[] = { 1709static const struct of_device_id gcc_mdm9615_match_table[] = {
@@ -1712,21 +1714,12 @@ MODULE_DEVICE_TABLE(of, gcc_mdm9615_match_table);
1712 1714
1713static int gcc_mdm9615_probe(struct platform_device *pdev) 1715static int gcc_mdm9615_probe(struct platform_device *pdev)
1714{ 1716{
1715 struct device *dev = &pdev->dev;
1716 struct regmap *regmap; 1717 struct regmap *regmap;
1717 int ret;
1718 int i;
1719 1718
1720 regmap = qcom_cc_map(pdev, &gcc_mdm9615_desc); 1719 regmap = qcom_cc_map(pdev, &gcc_mdm9615_desc);
1721 if (IS_ERR(regmap)) 1720 if (IS_ERR(regmap))
1722 return PTR_ERR(regmap); 1721 return PTR_ERR(regmap);
1723 1722
1724 for (i = 0; i < ARRAY_SIZE(gcc_mdm9615_hws); i++) {
1725 ret = devm_clk_hw_register(dev, gcc_mdm9615_hws[i]);
1726 if (ret)
1727 return ret;
1728 }
1729
1730 return qcom_cc_really_probe(pdev, &gcc_mdm9615_desc, regmap); 1723 return qcom_cc_really_probe(pdev, &gcc_mdm9615_desc, regmap);
1731} 1724}
1732 1725
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 9d136172c27c..4632b9272b7f 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -3656,6 +3656,8 @@ static const struct qcom_cc_desc gcc_msm8996_desc = {
3656 .num_resets = ARRAY_SIZE(gcc_msm8996_resets), 3656 .num_resets = ARRAY_SIZE(gcc_msm8996_resets),
3657 .gdscs = gcc_msm8996_gdscs, 3657 .gdscs = gcc_msm8996_gdscs,
3658 .num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs), 3658 .num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs),
3659 .clk_hws = gcc_msm8996_hws,
3660 .num_clk_hws = ARRAY_SIZE(gcc_msm8996_hws),
3659}; 3661};
3660 3662
3661static const struct of_device_id gcc_msm8996_match_table[] = { 3663static const struct of_device_id gcc_msm8996_match_table[] = {
@@ -3666,8 +3668,6 @@ MODULE_DEVICE_TABLE(of, gcc_msm8996_match_table);
3666 3668
3667static int gcc_msm8996_probe(struct platform_device *pdev) 3669static int gcc_msm8996_probe(struct platform_device *pdev)
3668{ 3670{
3669 struct device *dev = &pdev->dev;
3670 int i, ret;
3671 struct regmap *regmap; 3671 struct regmap *regmap;
3672 3672
3673 regmap = qcom_cc_map(pdev, &gcc_msm8996_desc); 3673 regmap = qcom_cc_map(pdev, &gcc_msm8996_desc);
@@ -3680,12 +3680,6 @@ static int gcc_msm8996_probe(struct platform_device *pdev)
3680 */ 3680 */
3681 regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21)); 3681 regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
3682 3682
3683 for (i = 0; i < ARRAY_SIZE(gcc_msm8996_hws); i++) {
3684 ret = devm_clk_hw_register(dev, gcc_msm8996_hws[i]);
3685 if (ret)
3686 return ret;
3687 }
3688
3689 return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap); 3683 return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap);
3690} 3684}
3691 3685
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 1b779396e04f..c240fba794c7 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -1112,6 +1112,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
1112 1112
1113static const struct freq_tbl ftbl_usb30_master_clk_src[] = { 1113static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
1114 F(19200000, P_XO, 1, 0, 0), 1114 F(19200000, P_XO, 1, 0, 0),
1115 F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
1115 F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0), 1116 F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
1116 F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), 1117 F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
1117 { } 1118 { }
@@ -1189,6 +1190,7 @@ static struct clk_branch gcc_aggre1_ufs_axi_clk = {
1189 "ufs_axi_clk_src", 1190 "ufs_axi_clk_src",
1190 }, 1191 },
1191 .num_parents = 1, 1192 .num_parents = 1,
1193 .flags = CLK_SET_RATE_PARENT,
1192 .ops = &clk_branch2_ops, 1194 .ops = &clk_branch2_ops,
1193 }, 1195 },
1194 }, 1196 },
@@ -1206,6 +1208,7 @@ static struct clk_branch gcc_aggre1_usb3_axi_clk = {
1206 "usb30_master_clk_src", 1208 "usb30_master_clk_src",
1207 }, 1209 },
1208 .num_parents = 1, 1210 .num_parents = 1,
1211 .flags = CLK_SET_RATE_PARENT,
1209 .ops = &clk_branch2_ops, 1212 .ops = &clk_branch2_ops,
1210 }, 1213 },
1211 }, 1214 },
@@ -1288,6 +1291,7 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
1288 "blsp1_qup1_i2c_apps_clk_src", 1291 "blsp1_qup1_i2c_apps_clk_src",
1289 }, 1292 },
1290 .num_parents = 1, 1293 .num_parents = 1,
1294 .flags = CLK_SET_RATE_PARENT,
1291 .ops = &clk_branch2_ops, 1295 .ops = &clk_branch2_ops,
1292 }, 1296 },
1293 }, 1297 },
@@ -1305,6 +1309,7 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
1305 "blsp1_qup1_spi_apps_clk_src", 1309 "blsp1_qup1_spi_apps_clk_src",
1306 }, 1310 },
1307 .num_parents = 1, 1311 .num_parents = 1,
1312 .flags = CLK_SET_RATE_PARENT,
1308 .ops = &clk_branch2_ops, 1313 .ops = &clk_branch2_ops,
1309 }, 1314 },
1310 }, 1315 },
@@ -1322,6 +1327,7 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
1322 "blsp1_qup2_i2c_apps_clk_src", 1327 "blsp1_qup2_i2c_apps_clk_src",
1323 }, 1328 },
1324 .num_parents = 1, 1329 .num_parents = 1,
1330 .flags = CLK_SET_RATE_PARENT,
1325 .ops = &clk_branch2_ops, 1331 .ops = &clk_branch2_ops,
1326 }, 1332 },
1327 }, 1333 },
@@ -1339,6 +1345,7 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
1339 "blsp1_qup2_spi_apps_clk_src", 1345 "blsp1_qup2_spi_apps_clk_src",
1340 }, 1346 },
1341 .num_parents = 1, 1347 .num_parents = 1,
1348 .flags = CLK_SET_RATE_PARENT,
1342 .ops = &clk_branch2_ops, 1349 .ops = &clk_branch2_ops,
1343 }, 1350 },
1344 }, 1351 },
@@ -1356,6 +1363,7 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
1356 "blsp1_qup3_i2c_apps_clk_src", 1363 "blsp1_qup3_i2c_apps_clk_src",
1357 }, 1364 },
1358 .num_parents = 1, 1365 .num_parents = 1,
1366 .flags = CLK_SET_RATE_PARENT,
1359 .ops = &clk_branch2_ops, 1367 .ops = &clk_branch2_ops,
1360 }, 1368 },
1361 }, 1369 },
@@ -1373,6 +1381,7 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
1373 "blsp1_qup3_spi_apps_clk_src", 1381 "blsp1_qup3_spi_apps_clk_src",
1374 }, 1382 },
1375 .num_parents = 1, 1383 .num_parents = 1,
1384 .flags = CLK_SET_RATE_PARENT,
1376 .ops = &clk_branch2_ops, 1385 .ops = &clk_branch2_ops,
1377 }, 1386 },
1378 }, 1387 },
@@ -1390,6 +1399,7 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
1390 "blsp1_qup4_i2c_apps_clk_src", 1399 "blsp1_qup4_i2c_apps_clk_src",
1391 }, 1400 },
1392 .num_parents = 1, 1401 .num_parents = 1,
1402 .flags = CLK_SET_RATE_PARENT,
1393 .ops = &clk_branch2_ops, 1403 .ops = &clk_branch2_ops,
1394 }, 1404 },
1395 }, 1405 },
@@ -1407,6 +1417,7 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
1407 "blsp1_qup4_spi_apps_clk_src", 1417 "blsp1_qup4_spi_apps_clk_src",
1408 }, 1418 },
1409 .num_parents = 1, 1419 .num_parents = 1,
1420 .flags = CLK_SET_RATE_PARENT,
1410 .ops = &clk_branch2_ops, 1421 .ops = &clk_branch2_ops,
1411 }, 1422 },
1412 }, 1423 },
@@ -1424,6 +1435,7 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
1424 "blsp1_qup5_i2c_apps_clk_src", 1435 "blsp1_qup5_i2c_apps_clk_src",
1425 }, 1436 },
1426 .num_parents = 1, 1437 .num_parents = 1,
1438 .flags = CLK_SET_RATE_PARENT,
1427 .ops = &clk_branch2_ops, 1439 .ops = &clk_branch2_ops,
1428 }, 1440 },
1429 }, 1441 },
@@ -1441,6 +1453,7 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
1441 "blsp1_qup5_spi_apps_clk_src", 1453 "blsp1_qup5_spi_apps_clk_src",
1442 }, 1454 },
1443 .num_parents = 1, 1455 .num_parents = 1,
1456 .flags = CLK_SET_RATE_PARENT,
1444 .ops = &clk_branch2_ops, 1457 .ops = &clk_branch2_ops,
1445 }, 1458 },
1446 }, 1459 },
@@ -1458,6 +1471,7 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
1458 "blsp1_qup6_i2c_apps_clk_src", 1471 "blsp1_qup6_i2c_apps_clk_src",
1459 }, 1472 },
1460 .num_parents = 1, 1473 .num_parents = 1,
1474 .flags = CLK_SET_RATE_PARENT,
1461 .ops = &clk_branch2_ops, 1475 .ops = &clk_branch2_ops,
1462 }, 1476 },
1463 }, 1477 },
@@ -1475,6 +1489,7 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
1475 "blsp1_qup6_spi_apps_clk_src", 1489 "blsp1_qup6_spi_apps_clk_src",
1476 }, 1490 },
1477 .num_parents = 1, 1491 .num_parents = 1,
1492 .flags = CLK_SET_RATE_PARENT,
1478 .ops = &clk_branch2_ops, 1493 .ops = &clk_branch2_ops,
1479 }, 1494 },
1480 }, 1495 },
@@ -1505,6 +1520,7 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
1505 "blsp1_uart1_apps_clk_src", 1520 "blsp1_uart1_apps_clk_src",
1506 }, 1521 },
1507 .num_parents = 1, 1522 .num_parents = 1,
1523 .flags = CLK_SET_RATE_PARENT,
1508 .ops = &clk_branch2_ops, 1524 .ops = &clk_branch2_ops,
1509 }, 1525 },
1510 }, 1526 },
@@ -1522,6 +1538,7 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
1522 "blsp1_uart2_apps_clk_src", 1538 "blsp1_uart2_apps_clk_src",
1523 }, 1539 },
1524 .num_parents = 1, 1540 .num_parents = 1,
1541 .flags = CLK_SET_RATE_PARENT,
1525 .ops = &clk_branch2_ops, 1542 .ops = &clk_branch2_ops,
1526 }, 1543 },
1527 }, 1544 },
@@ -1539,6 +1556,7 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
1539 "blsp1_uart3_apps_clk_src", 1556 "blsp1_uart3_apps_clk_src",
1540 }, 1557 },
1541 .num_parents = 1, 1558 .num_parents = 1,
1559 .flags = CLK_SET_RATE_PARENT,
1542 .ops = &clk_branch2_ops, 1560 .ops = &clk_branch2_ops,
1543 }, 1561 },
1544 }, 1562 },
@@ -1569,6 +1587,7 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
1569 "blsp2_qup1_i2c_apps_clk_src", 1587 "blsp2_qup1_i2c_apps_clk_src",
1570 }, 1588 },
1571 .num_parents = 1, 1589 .num_parents = 1,
1590 .flags = CLK_SET_RATE_PARENT,
1572 .ops = &clk_branch2_ops, 1591 .ops = &clk_branch2_ops,
1573 }, 1592 },
1574 }, 1593 },
@@ -1586,6 +1605,7 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
1586 "blsp2_qup1_spi_apps_clk_src", 1605 "blsp2_qup1_spi_apps_clk_src",
1587 }, 1606 },
1588 .num_parents = 1, 1607 .num_parents = 1,
1608 .flags = CLK_SET_RATE_PARENT,
1589 .ops = &clk_branch2_ops, 1609 .ops = &clk_branch2_ops,
1590 }, 1610 },
1591 }, 1611 },
@@ -1603,6 +1623,7 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
1603 "blsp2_qup2_i2c_apps_clk_src", 1623 "blsp2_qup2_i2c_apps_clk_src",
1604 }, 1624 },
1605 .num_parents = 1, 1625 .num_parents = 1,
1626 .flags = CLK_SET_RATE_PARENT,
1606 .ops = &clk_branch2_ops, 1627 .ops = &clk_branch2_ops,
1607 }, 1628 },
1608 }, 1629 },
@@ -1620,6 +1641,7 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
1620 "blsp2_qup2_spi_apps_clk_src", 1641 "blsp2_qup2_spi_apps_clk_src",
1621 }, 1642 },
1622 .num_parents = 1, 1643 .num_parents = 1,
1644 .flags = CLK_SET_RATE_PARENT,
1623 .ops = &clk_branch2_ops, 1645 .ops = &clk_branch2_ops,
1624 }, 1646 },
1625 }, 1647 },
@@ -1637,6 +1659,7 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
1637 "blsp2_qup3_i2c_apps_clk_src", 1659 "blsp2_qup3_i2c_apps_clk_src",
1638 }, 1660 },
1639 .num_parents = 1, 1661 .num_parents = 1,
1662 .flags = CLK_SET_RATE_PARENT,
1640 .ops = &clk_branch2_ops, 1663 .ops = &clk_branch2_ops,
1641 }, 1664 },
1642 }, 1665 },
@@ -1654,6 +1677,7 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
1654 "blsp2_qup3_spi_apps_clk_src", 1677 "blsp2_qup3_spi_apps_clk_src",
1655 }, 1678 },
1656 .num_parents = 1, 1679 .num_parents = 1,
1680 .flags = CLK_SET_RATE_PARENT,
1657 .ops = &clk_branch2_ops, 1681 .ops = &clk_branch2_ops,
1658 }, 1682 },
1659 }, 1683 },
@@ -1671,6 +1695,7 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
1671 "blsp2_qup4_i2c_apps_clk_src", 1695 "blsp2_qup4_i2c_apps_clk_src",
1672 }, 1696 },
1673 .num_parents = 1, 1697 .num_parents = 1,
1698 .flags = CLK_SET_RATE_PARENT,
1674 .ops = &clk_branch2_ops, 1699 .ops = &clk_branch2_ops,
1675 }, 1700 },
1676 }, 1701 },
@@ -1688,6 +1713,7 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
1688 "blsp2_qup4_spi_apps_clk_src", 1713 "blsp2_qup4_spi_apps_clk_src",
1689 }, 1714 },
1690 .num_parents = 1, 1715 .num_parents = 1,
1716 .flags = CLK_SET_RATE_PARENT,
1691 .ops = &clk_branch2_ops, 1717 .ops = &clk_branch2_ops,
1692 }, 1718 },
1693 }, 1719 },
@@ -1705,6 +1731,7 @@ static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
1705 "blsp2_qup5_i2c_apps_clk_src", 1731 "blsp2_qup5_i2c_apps_clk_src",
1706 }, 1732 },
1707 .num_parents = 1, 1733 .num_parents = 1,
1734 .flags = CLK_SET_RATE_PARENT,
1708 .ops = &clk_branch2_ops, 1735 .ops = &clk_branch2_ops,
1709 }, 1736 },
1710 }, 1737 },
@@ -1722,6 +1749,7 @@ static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
1722 "blsp2_qup5_spi_apps_clk_src", 1749 "blsp2_qup5_spi_apps_clk_src",
1723 }, 1750 },
1724 .num_parents = 1, 1751 .num_parents = 1,
1752 .flags = CLK_SET_RATE_PARENT,
1725 .ops = &clk_branch2_ops, 1753 .ops = &clk_branch2_ops,
1726 }, 1754 },
1727 }, 1755 },
@@ -1739,6 +1767,7 @@ static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
1739 "blsp2_qup6_i2c_apps_clk_src", 1767 "blsp2_qup6_i2c_apps_clk_src",
1740 }, 1768 },
1741 .num_parents = 1, 1769 .num_parents = 1,
1770 .flags = CLK_SET_RATE_PARENT,
1742 .ops = &clk_branch2_ops, 1771 .ops = &clk_branch2_ops,
1743 }, 1772 },
1744 }, 1773 },
@@ -1756,6 +1785,7 @@ static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
1756 "blsp2_qup6_spi_apps_clk_src", 1785 "blsp2_qup6_spi_apps_clk_src",
1757 }, 1786 },
1758 .num_parents = 1, 1787 .num_parents = 1,
1788 .flags = CLK_SET_RATE_PARENT,
1759 .ops = &clk_branch2_ops, 1789 .ops = &clk_branch2_ops,
1760 }, 1790 },
1761 }, 1791 },
@@ -1786,6 +1816,7 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
1786 "blsp2_uart1_apps_clk_src", 1816 "blsp2_uart1_apps_clk_src",
1787 }, 1817 },
1788 .num_parents = 1, 1818 .num_parents = 1,
1819 .flags = CLK_SET_RATE_PARENT,
1789 .ops = &clk_branch2_ops, 1820 .ops = &clk_branch2_ops,
1790 }, 1821 },
1791 }, 1822 },
@@ -1803,6 +1834,7 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
1803 "blsp2_uart2_apps_clk_src", 1834 "blsp2_uart2_apps_clk_src",
1804 }, 1835 },
1805 .num_parents = 1, 1836 .num_parents = 1,
1837 .flags = CLK_SET_RATE_PARENT,
1806 .ops = &clk_branch2_ops, 1838 .ops = &clk_branch2_ops,
1807 }, 1839 },
1808 }, 1840 },
@@ -1820,6 +1852,7 @@ static struct clk_branch gcc_blsp2_uart3_apps_clk = {
1820 "blsp2_uart3_apps_clk_src", 1852 "blsp2_uart3_apps_clk_src",
1821 }, 1853 },
1822 .num_parents = 1, 1854 .num_parents = 1,
1855 .flags = CLK_SET_RATE_PARENT,
1823 .ops = &clk_branch2_ops, 1856 .ops = &clk_branch2_ops,
1824 }, 1857 },
1825 }, 1858 },
@@ -1837,6 +1870,7 @@ static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
1837 "usb30_master_clk_src", 1870 "usb30_master_clk_src",
1838 }, 1871 },
1839 .num_parents = 1, 1872 .num_parents = 1,
1873 .flags = CLK_SET_RATE_PARENT,
1840 .ops = &clk_branch2_ops, 1874 .ops = &clk_branch2_ops,
1841 }, 1875 },
1842 }, 1876 },
@@ -1854,6 +1888,7 @@ static struct clk_branch gcc_gp1_clk = {
1854 "gp1_clk_src", 1888 "gp1_clk_src",
1855 }, 1889 },
1856 .num_parents = 1, 1890 .num_parents = 1,
1891 .flags = CLK_SET_RATE_PARENT,
1857 .ops = &clk_branch2_ops, 1892 .ops = &clk_branch2_ops,
1858 }, 1893 },
1859 }, 1894 },
@@ -1871,6 +1906,7 @@ static struct clk_branch gcc_gp2_clk = {
1871 "gp2_clk_src", 1906 "gp2_clk_src",
1872 }, 1907 },
1873 .num_parents = 1, 1908 .num_parents = 1,
1909 .flags = CLK_SET_RATE_PARENT,
1874 .ops = &clk_branch2_ops, 1910 .ops = &clk_branch2_ops,
1875 }, 1911 },
1876 }, 1912 },
@@ -1888,6 +1924,7 @@ static struct clk_branch gcc_gp3_clk = {
1888 "gp3_clk_src", 1924 "gp3_clk_src",
1889 }, 1925 },
1890 .num_parents = 1, 1926 .num_parents = 1,
1927 .flags = CLK_SET_RATE_PARENT,
1891 .ops = &clk_branch2_ops, 1928 .ops = &clk_branch2_ops,
1892 }, 1929 },
1893 }, 1930 },
@@ -1957,6 +1994,7 @@ static struct clk_branch gcc_hmss_ahb_clk = {
1957 "hmss_ahb_clk_src", 1994 "hmss_ahb_clk_src",
1958 }, 1995 },
1959 .num_parents = 1, 1996 .num_parents = 1,
1997 .flags = CLK_SET_RATE_PARENT,
1960 .ops = &clk_branch2_ops, 1998 .ops = &clk_branch2_ops,
1961 }, 1999 },
1962 }, 2000 },
@@ -1987,6 +2025,7 @@ static struct clk_branch gcc_hmss_rbcpr_clk = {
1987 "hmss_rbcpr_clk_src", 2025 "hmss_rbcpr_clk_src",
1988 }, 2026 },
1989 .num_parents = 1, 2027 .num_parents = 1,
2028 .flags = CLK_SET_RATE_PARENT,
1990 .ops = &clk_branch2_ops, 2029 .ops = &clk_branch2_ops,
1991 }, 2030 },
1992 }, 2031 },
@@ -2088,6 +2127,7 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
2088 "pcie_aux_clk_src", 2127 "pcie_aux_clk_src",
2089 }, 2128 },
2090 .num_parents = 1, 2129 .num_parents = 1,
2130 .flags = CLK_SET_RATE_PARENT,
2091 .ops = &clk_branch2_ops, 2131 .ops = &clk_branch2_ops,
2092 }, 2132 },
2093 }, 2133 },
@@ -2157,6 +2197,7 @@ static struct clk_branch gcc_pcie_phy_aux_clk = {
2157 "pcie_aux_clk_src", 2197 "pcie_aux_clk_src",
2158 }, 2198 },
2159 .num_parents = 1, 2199 .num_parents = 1,
2200 .flags = CLK_SET_RATE_PARENT,
2160 .ops = &clk_branch2_ops, 2201 .ops = &clk_branch2_ops,
2161 }, 2202 },
2162 }, 2203 },
@@ -2174,6 +2215,7 @@ static struct clk_branch gcc_pdm2_clk = {
2174 "pdm2_clk_src", 2215 "pdm2_clk_src",
2175 }, 2216 },
2176 .num_parents = 1, 2217 .num_parents = 1,
2218 .flags = CLK_SET_RATE_PARENT,
2177 .ops = &clk_branch2_ops, 2219 .ops = &clk_branch2_ops,
2178 }, 2220 },
2179 }, 2221 },
@@ -2243,6 +2285,7 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
2243 "sdcc2_apps_clk_src", 2285 "sdcc2_apps_clk_src",
2244 }, 2286 },
2245 .num_parents = 1, 2287 .num_parents = 1,
2288 .flags = CLK_SET_RATE_PARENT,
2246 .ops = &clk_branch2_ops, 2289 .ops = &clk_branch2_ops,
2247 }, 2290 },
2248 }, 2291 },
@@ -2273,6 +2316,7 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
2273 "sdcc4_apps_clk_src", 2316 "sdcc4_apps_clk_src",
2274 }, 2317 },
2275 .num_parents = 1, 2318 .num_parents = 1,
2319 .flags = CLK_SET_RATE_PARENT,
2276 .ops = &clk_branch2_ops, 2320 .ops = &clk_branch2_ops,
2277 }, 2321 },
2278 }, 2322 },
@@ -2316,6 +2360,7 @@ static struct clk_branch gcc_tsif_ref_clk = {
2316 "tsif_ref_clk_src", 2360 "tsif_ref_clk_src",
2317 }, 2361 },
2318 .num_parents = 1, 2362 .num_parents = 1,
2363 .flags = CLK_SET_RATE_PARENT,
2319 .ops = &clk_branch2_ops, 2364 .ops = &clk_branch2_ops,
2320 }, 2365 },
2321 }, 2366 },
@@ -2346,6 +2391,7 @@ static struct clk_branch gcc_ufs_axi_clk = {
2346 "ufs_axi_clk_src", 2391 "ufs_axi_clk_src",
2347 }, 2392 },
2348 .num_parents = 1, 2393 .num_parents = 1,
2394 .flags = CLK_SET_RATE_PARENT,
2349 .ops = &clk_branch2_ops, 2395 .ops = &clk_branch2_ops,
2350 }, 2396 },
2351 }, 2397 },
@@ -2441,6 +2487,7 @@ static struct clk_branch gcc_usb30_master_clk = {
2441 "usb30_master_clk_src", 2487 "usb30_master_clk_src",
2442 }, 2488 },
2443 .num_parents = 1, 2489 .num_parents = 1,
2490 .flags = CLK_SET_RATE_PARENT,
2444 .ops = &clk_branch2_ops, 2491 .ops = &clk_branch2_ops,
2445 }, 2492 },
2446 }, 2493 },
@@ -2458,6 +2505,7 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
2458 "usb30_mock_utmi_clk_src", 2505 "usb30_mock_utmi_clk_src",
2459 }, 2506 },
2460 .num_parents = 1, 2507 .num_parents = 1,
2508 .flags = CLK_SET_RATE_PARENT,
2461 .ops = &clk_branch2_ops, 2509 .ops = &clk_branch2_ops,
2462 }, 2510 },
2463 }, 2511 },
@@ -2488,6 +2536,7 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
2488 "usb3_phy_aux_clk_src", 2536 "usb3_phy_aux_clk_src",
2489 }, 2537 },
2490 .num_parents = 1, 2538 .num_parents = 1,
2539 .flags = CLK_SET_RATE_PARENT,
2491 .ops = &clk_branch2_ops, 2540 .ops = &clk_branch2_ops,
2492 }, 2541 },
2493 }, 2542 },
@@ -2495,7 +2544,7 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
2495 2544
2496static struct clk_branch gcc_usb3_phy_pipe_clk = { 2545static struct clk_branch gcc_usb3_phy_pipe_clk = {
2497 .halt_reg = 0x50004, 2546 .halt_reg = 0x50004,
2498 .halt_check = BRANCH_HALT, 2547 .halt_check = BRANCH_HALT_SKIP,
2499 .clkr = { 2548 .clkr = {
2500 .enable_reg = 0x50004, 2549 .enable_reg = 0x50004,
2501 .enable_mask = BIT(0), 2550 .enable_mask = BIT(0),
@@ -2910,6 +2959,10 @@ static const struct regmap_config gcc_msm8998_regmap_config = {
2910 .fast_io = true, 2959 .fast_io = true,
2911}; 2960};
2912 2961
2962static struct clk_hw *gcc_msm8998_hws[] = {
2963 &xo.hw,
2964};
2965
2913static const struct qcom_cc_desc gcc_msm8998_desc = { 2966static const struct qcom_cc_desc gcc_msm8998_desc = {
2914 .config = &gcc_msm8998_regmap_config, 2967 .config = &gcc_msm8998_regmap_config,
2915 .clks = gcc_msm8998_clocks, 2968 .clks = gcc_msm8998_clocks,
@@ -2918,6 +2971,8 @@ static const struct qcom_cc_desc gcc_msm8998_desc = {
2918 .num_resets = ARRAY_SIZE(gcc_msm8998_resets), 2971 .num_resets = ARRAY_SIZE(gcc_msm8998_resets),
2919 .gdscs = gcc_msm8998_gdscs, 2972 .gdscs = gcc_msm8998_gdscs,
2920 .num_gdscs = ARRAY_SIZE(gcc_msm8998_gdscs), 2973 .num_gdscs = ARRAY_SIZE(gcc_msm8998_gdscs),
2974 .clk_hws = gcc_msm8998_hws,
2975 .num_clk_hws = ARRAY_SIZE(gcc_msm8998_hws),
2921}; 2976};
2922 2977
2923static int gcc_msm8998_probe(struct platform_device *pdev) 2978static int gcc_msm8998_probe(struct platform_device *pdev)
@@ -2937,10 +2992,6 @@ static int gcc_msm8998_probe(struct platform_device *pdev)
2937 if (ret) 2992 if (ret)
2938 return ret; 2993 return ret;
2939 2994
2940 ret = devm_clk_hw_register(&pdev->dev, &xo.hw);
2941 if (ret)
2942 return ret;
2943
2944 return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap); 2995 return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap);
2945} 2996}
2946 2997
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 64da032bb9ed..5a62f64ada93 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -678,6 +678,7 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
678 .cmd_rcgr = 0x4014, 678 .cmd_rcgr = 0x4014,
679 .mnd_width = 16, 679 .mnd_width = 16,
680 .hid_width = 5, 680 .hid_width = 5,
681 .cfg_off = 0x20,
681 .parent_map = gcc_parent_map_0, 682 .parent_map = gcc_parent_map_0,
682 .freq_tbl = ftbl_blsp1_uart0_apps_clk_src, 683 .freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
683 .clkr.hw.init = &(struct clk_init_data){ 684 .clkr.hw.init = &(struct clk_init_data){
@@ -2692,6 +2693,8 @@ static const struct qcom_cc_desc gcc_qcs404_desc = {
2692 .num_clks = ARRAY_SIZE(gcc_qcs404_clocks), 2693 .num_clks = ARRAY_SIZE(gcc_qcs404_clocks),
2693 .resets = gcc_qcs404_resets, 2694 .resets = gcc_qcs404_resets,
2694 .num_resets = ARRAY_SIZE(gcc_qcs404_resets), 2695 .num_resets = ARRAY_SIZE(gcc_qcs404_resets),
2696 .clk_hws = gcc_qcs404_hws,
2697 .num_clk_hws = ARRAY_SIZE(gcc_qcs404_hws),
2695}; 2698};
2696 2699
2697static const struct of_device_id gcc_qcs404_match_table[] = { 2700static const struct of_device_id gcc_qcs404_match_table[] = {
@@ -2703,7 +2706,6 @@ MODULE_DEVICE_TABLE(of, gcc_qcs404_match_table);
2703static int gcc_qcs404_probe(struct platform_device *pdev) 2706static int gcc_qcs404_probe(struct platform_device *pdev)
2704{ 2707{
2705 struct regmap *regmap; 2708 struct regmap *regmap;
2706 int ret, i;
2707 2709
2708 regmap = qcom_cc_map(pdev, &gcc_qcs404_desc); 2710 regmap = qcom_cc_map(pdev, &gcc_qcs404_desc);
2709 if (IS_ERR(regmap)) 2711 if (IS_ERR(regmap))
@@ -2711,12 +2713,6 @@ static int gcc_qcs404_probe(struct platform_device *pdev)
2711 2713
2712 clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config); 2714 clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config);
2713 2715
2714 for (i = 0; i < ARRAY_SIZE(gcc_qcs404_hws); i++) {
2715 ret = devm_clk_hw_register(&pdev->dev, gcc_qcs404_hws[i]);
2716 if (ret)
2717 return ret;
2718 }
2719
2720 return qcom_cc_really_probe(pdev, &gcc_qcs404_desc, regmap); 2716 return qcom_cc_really_probe(pdev, &gcc_qcs404_desc, regmap);
2721} 2717}
2722 2718
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index ba239ea4c842..8827db23066f 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -2420,6 +2420,8 @@ static const struct qcom_cc_desc gcc_sdm660_desc = {
2420 .num_resets = ARRAY_SIZE(gcc_sdm660_resets), 2420 .num_resets = ARRAY_SIZE(gcc_sdm660_resets),
2421 .gdscs = gcc_sdm660_gdscs, 2421 .gdscs = gcc_sdm660_gdscs,
2422 .num_gdscs = ARRAY_SIZE(gcc_sdm660_gdscs), 2422 .num_gdscs = ARRAY_SIZE(gcc_sdm660_gdscs),
2423 .clk_hws = gcc_sdm660_hws,
2424 .num_clk_hws = ARRAY_SIZE(gcc_sdm660_hws),
2423}; 2425};
2424 2426
2425static const struct of_device_id gcc_sdm660_match_table[] = { 2427static const struct of_device_id gcc_sdm660_match_table[] = {
@@ -2431,7 +2433,7 @@ MODULE_DEVICE_TABLE(of, gcc_sdm660_match_table);
2431 2433
2432static int gcc_sdm660_probe(struct platform_device *pdev) 2434static int gcc_sdm660_probe(struct platform_device *pdev)
2433{ 2435{
2434 int i, ret; 2436 int ret;
2435 struct regmap *regmap; 2437 struct regmap *regmap;
2436 2438
2437 regmap = qcom_cc_map(pdev, &gcc_sdm660_desc); 2439 regmap = qcom_cc_map(pdev, &gcc_sdm660_desc);
@@ -2446,13 +2448,6 @@ static int gcc_sdm660_probe(struct platform_device *pdev)
2446 if (ret) 2448 if (ret)
2447 return ret; 2449 return ret;
2448 2450
2449 /* Register the hws */
2450 for (i = 0; i < ARRAY_SIZE(gcc_sdm660_hws); i++) {
2451 ret = devm_clk_hw_register(&pdev->dev, gcc_sdm660_hws[i]);
2452 if (ret)
2453 return ret;
2454 }
2455
2456 return qcom_cc_really_probe(pdev, &gcc_sdm660_desc, regmap); 2451 return qcom_cc_really_probe(pdev, &gcc_sdm660_desc, regmap);
2457} 2452}
2458 2453
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index c782e62dd98b..58fa5c247af1 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = {
115 "core_bi_pll_test_se", 115 "core_bi_pll_test_se",
116}; 116};
117 117
118static const char * const gcc_parent_names_7[] = { 118static const char * const gcc_parent_names_7_ao[] = {
119 "bi_tcxo", 119 "bi_tcxo_ao",
120 "gpll0", 120 "gpll0",
121 "gpll0_out_even", 121 "gpll0_out_even",
122 "core_bi_pll_test_se", 122 "core_bi_pll_test_se",
@@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = {
128 "core_bi_pll_test_se", 128 "core_bi_pll_test_se",
129}; 129};
130 130
131static const char * const gcc_parent_names_8_ao[] = {
132 "bi_tcxo_ao",
133 "gpll0",
134 "core_bi_pll_test_se",
135};
136
131static const struct parent_map gcc_parent_map_10[] = { 137static const struct parent_map gcc_parent_map_10[] = {
132 { P_BI_TCXO, 0 }, 138 { P_BI_TCXO, 0 },
133 { P_GPLL0_OUT_MAIN, 1 }, 139 { P_GPLL0_OUT_MAIN, 1 },
@@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
210 .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, 216 .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
211 .clkr.hw.init = &(struct clk_init_data){ 217 .clkr.hw.init = &(struct clk_init_data){
212 .name = "gcc_cpuss_ahb_clk_src", 218 .name = "gcc_cpuss_ahb_clk_src",
213 .parent_names = gcc_parent_names_7, 219 .parent_names = gcc_parent_names_7_ao,
214 .num_parents = 4, 220 .num_parents = 4,
215 .ops = &clk_rcg2_ops, 221 .ops = &clk_rcg2_ops,
216 }, 222 },
@@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
229 .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, 235 .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
230 .clkr.hw.init = &(struct clk_init_data){ 236 .clkr.hw.init = &(struct clk_init_data){
231 .name = "gcc_cpuss_rbcpr_clk_src", 237 .name = "gcc_cpuss_rbcpr_clk_src",
232 .parent_names = gcc_parent_names_8, 238 .parent_names = gcc_parent_names_8_ao,
233 .num_parents = 3, 239 .num_parents = 3,
234 .ops = &clk_rcg2_ops, 240 .ops = &clk_rcg2_ops,
235 }, 241 },
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 7d4ee109435c..7235510eac94 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -3347,6 +3347,8 @@ static const struct qcom_cc_desc mmcc_msm8996_desc = {
3347 .num_resets = ARRAY_SIZE(mmcc_msm8996_resets), 3347 .num_resets = ARRAY_SIZE(mmcc_msm8996_resets),
3348 .gdscs = mmcc_msm8996_gdscs, 3348 .gdscs = mmcc_msm8996_gdscs,
3349 .num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs), 3349 .num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs),
3350 .clk_hws = mmcc_msm8996_hws,
3351 .num_clk_hws = ARRAY_SIZE(mmcc_msm8996_hws),
3350}; 3352};
3351 3353
3352static const struct of_device_id mmcc_msm8996_match_table[] = { 3354static const struct of_device_id mmcc_msm8996_match_table[] = {
@@ -3357,8 +3359,6 @@ MODULE_DEVICE_TABLE(of, mmcc_msm8996_match_table);
3357 3359
3358static int mmcc_msm8996_probe(struct platform_device *pdev) 3360static int mmcc_msm8996_probe(struct platform_device *pdev)
3359{ 3361{
3360 struct device *dev = &pdev->dev;
3361 int i, ret;
3362 struct regmap *regmap; 3362 struct regmap *regmap;
3363 3363
3364 regmap = qcom_cc_map(pdev, &mmcc_msm8996_desc); 3364 regmap = qcom_cc_map(pdev, &mmcc_msm8996_desc);
@@ -3370,12 +3370,6 @@ static int mmcc_msm8996_probe(struct platform_device *pdev)
3370 /* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */ 3370 /* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */
3371 regmap_update_bits(regmap, 0x5054, BIT(15), 0); 3371 regmap_update_bits(regmap, 0x5054, BIT(15), 0);
3372 3372
3373 for (i = 0; i < ARRAY_SIZE(mmcc_msm8996_hws); i++) {
3374 ret = devm_clk_hw_register(dev, mmcc_msm8996_hws[i]);
3375 if (ret)
3376 return ret;
3377 }
3378
3379 return qcom_cc_really_probe(pdev, &mmcc_msm8996_desc, regmap); 3373 return qcom_cc_really_probe(pdev, &mmcc_msm8996_desc, regmap);
3380} 3374}
3381 3375
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index 10e852518870..4d92b27a6153 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -21,7 +21,7 @@
21 21
22enum clk_ids { 22enum clk_ids {
23 /* Core Clock Outputs exported to DT */ 23 /* Core Clock Outputs exported to DT */
24 LAST_DT_CORE_CLK = R8A774A1_CLK_OSC, 24 LAST_DT_CORE_CLK = R8A774A1_CLK_CANFD,
25 25
26 /* External Input Clocks */ 26 /* External Input Clocks */
27 CLK_EXTAL, 27 CLK_EXTAL,
@@ -102,6 +102,7 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
102 DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1), 102 DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1),
103 DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1), 103 DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1),
104 104
105 DEF_DIV6P1("canfd", R8A774A1_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
105 DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), 106 DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
106 DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014), 107 DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014),
107 DEF_DIV6P1("hdmi", R8A774A1_CLK_HDMI, CLK_PLL1_DIV4, 0x250), 108 DEF_DIV6P1("hdmi", R8A774A1_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
@@ -191,6 +192,7 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
191 DEF_MOD("gpio2", 910, R8A774A1_CLK_S3D4), 192 DEF_MOD("gpio2", 910, R8A774A1_CLK_S3D4),
192 DEF_MOD("gpio1", 911, R8A774A1_CLK_S3D4), 193 DEF_MOD("gpio1", 911, R8A774A1_CLK_S3D4),
193 DEF_MOD("gpio0", 912, R8A774A1_CLK_S3D4), 194 DEF_MOD("gpio0", 912, R8A774A1_CLK_S3D4),
195 DEF_MOD("can-fd", 914, R8A774A1_CLK_S3D2),
194 DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4), 196 DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4),
195 DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4), 197 DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4),
196 DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6), 198 DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index 10b96895d452..34e274f2a273 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -22,7 +22,7 @@
22 22
23enum clk_ids { 23enum clk_ids {
24 /* Core Clock Outputs exported to DT */ 24 /* Core Clock Outputs exported to DT */
25 LAST_DT_CORE_CLK = R8A774C0_CLK_CPEX, 25 LAST_DT_CORE_CLK = R8A774C0_CLK_CANFD,
26 26
27 /* External Input Clocks */ 27 /* External Input Clocks */
28 CLK_EXTAL, 28 CLK_EXTAL,
@@ -33,6 +33,7 @@ enum clk_ids {
33 CLK_PLL1, 33 CLK_PLL1,
34 CLK_PLL3, 34 CLK_PLL3,
35 CLK_PLL0D4, 35 CLK_PLL0D4,
36 CLK_PLL0D6,
36 CLK_PLL0D8, 37 CLK_PLL0D8,
37 CLK_PLL0D20, 38 CLK_PLL0D20,
38 CLK_PLL0D24, 39 CLK_PLL0D24,
@@ -61,6 +62,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
61 62
62 DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100), 63 DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100),
63 DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1), 64 DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1),
65 DEF_FIXED(".pll0d6", CLK_PLL0D6, CLK_PLL0, 6, 1),
64 DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1), 66 DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1),
65 DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1), 67 DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1),
66 DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1), 68 DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1),
@@ -112,6 +114,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
112 DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2), 114 DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2),
113 DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4), 115 DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4),
114 116
117 DEF_DIV6P1("canfd", R8A774C0_CLK_CANFD, CLK_PLL0D6, 0x244),
115 DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c), 118 DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c),
116 DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014), 119 DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014),
117 120
@@ -119,6 +122,11 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
119}; 122};
120 123
121static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = { 124static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
125 DEF_MOD("tmu4", 121, R8A774C0_CLK_S0D6C),
126 DEF_MOD("tmu3", 122, R8A774C0_CLK_S3D2C),
127 DEF_MOD("tmu2", 123, R8A774C0_CLK_S3D2C),
128 DEF_MOD("tmu1", 124, R8A774C0_CLK_S3D2C),
129 DEF_MOD("tmu0", 125, R8A774C0_CLK_CP),
122 DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C), 130 DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C),
123 DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C), 131 DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C),
124 DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C), 132 DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C),
@@ -172,8 +180,8 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
172 DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4), 180 DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4),
173 DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4), 181 DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4),
174 DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0), 182 DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0),
175 DEF_MOD("du1", 723, R8A774C0_CLK_S2D1), 183 DEF_MOD("du1", 723, R8A774C0_CLK_S1D1),
176 DEF_MOD("du0", 724, R8A774C0_CLK_S2D1), 184 DEF_MOD("du0", 724, R8A774C0_CLK_S1D1),
177 DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1), 185 DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1),
178 186
179 DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2), 187 DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2),
@@ -187,6 +195,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
187 DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4), 195 DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4),
188 DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4), 196 DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4),
189 DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4), 197 DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4),
198 DEF_MOD("can-fd", 914, R8A774C0_CLK_S3D2),
190 DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4), 199 DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4),
191 DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4), 200 DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4),
192 DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2), 201 DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index 25a3083b6764..f9e07fcc0d96 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -41,6 +41,7 @@ enum clk_ids {
41 CLK_S2, 41 CLK_S2,
42 CLK_S3, 42 CLK_S3,
43 CLK_SDSRC, 43 CLK_SDSRC,
44 CLK_RPCSRC,
44 CLK_OCO, 45 CLK_OCO,
45 46
46 /* Module Clocks */ 47 /* Module Clocks */
@@ -65,8 +66,14 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
65 DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), 66 DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
66 DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), 67 DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
67 DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), 68 DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
69 DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
68 DEF_RATE(".oco", CLK_OCO, 32768), 70 DEF_RATE(".oco", CLK_OCO, 32768),
69 71
72 DEF_BASE("rpc", R8A77980_CLK_RPC, CLK_TYPE_GEN3_RPC,
73 CLK_RPCSRC),
74 DEF_BASE("rpcd2", R8A77980_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
75 R8A77980_CLK_RPC),
76
70 /* Core Clock Outputs */ 77 /* Core Clock Outputs */
71 DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), 78 DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
72 DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), 79 DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
@@ -164,6 +171,7 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
164 DEF_MOD("gpio1", 911, R8A77980_CLK_CP), 171 DEF_MOD("gpio1", 911, R8A77980_CLK_CP),
165 DEF_MOD("gpio0", 912, R8A77980_CLK_CP), 172 DEF_MOD("gpio0", 912, R8A77980_CLK_CP),
166 DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2), 173 DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2),
174 DEF_MOD("rpc-if", 917, R8A77980_CLK_RPC),
167 DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6), 175 DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6),
168 DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6), 176 DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6),
169 DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2), 177 DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2),
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index be2ccbd6d623..9a8071a8114d 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -30,6 +30,21 @@
30 30
31#define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */ 31#define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */
32 32
33static spinlock_t cpg_lock;
34
35static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
36{
37 unsigned long flags;
38 u32 val;
39
40 spin_lock_irqsave(&cpg_lock, flags);
41 val = readl(reg);
42 val &= ~clear;
43 val |= set;
44 writel(val, reg);
45 spin_unlock_irqrestore(&cpg_lock, flags);
46};
47
33struct cpg_simple_notifier { 48struct cpg_simple_notifier {
34 struct notifier_block nb; 49 struct notifier_block nb;
35 void __iomem *reg; 50 void __iomem *reg;
@@ -118,7 +133,6 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
118 struct cpg_z_clk *zclk = to_z_clk(hw); 133 struct cpg_z_clk *zclk = to_z_clk(hw);
119 unsigned int mult; 134 unsigned int mult;
120 unsigned int i; 135 unsigned int i;
121 u32 val, kick;
122 136
123 /* Factor of 2 is for fixed divider */ 137 /* Factor of 2 is for fixed divider */
124 mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate); 138 mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate);
@@ -127,17 +141,14 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
127 if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK) 141 if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
128 return -EBUSY; 142 return -EBUSY;
129 143
130 val = readl(zclk->reg) & ~zclk->mask; 144 cpg_reg_modify(zclk->reg, zclk->mask,
131 val |= ((32 - mult) << __ffs(zclk->mask)) & zclk->mask; 145 ((32 - mult) << __ffs(zclk->mask)) & zclk->mask);
132 writel(val, zclk->reg);
133 146
134 /* 147 /*
135 * Set KICK bit in FRQCRB to update hardware setting and wait for 148 * Set KICK bit in FRQCRB to update hardware setting and wait for
136 * clock change completion. 149 * clock change completion.
137 */ 150 */
138 kick = readl(zclk->kick_reg); 151 cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
139 kick |= CPG_FRQCRB_KICK;
140 writel(kick, zclk->kick_reg);
141 152
142 /* 153 /*
143 * Note: There is no HW information about the worst case latency. 154 * Note: There is no HW information about the worst case latency.
@@ -266,12 +277,10 @@ static const struct sd_div_table cpg_sd_div_table[] = {
266static int cpg_sd_clock_enable(struct clk_hw *hw) 277static int cpg_sd_clock_enable(struct clk_hw *hw)
267{ 278{
268 struct sd_clock *clock = to_sd_clock(hw); 279 struct sd_clock *clock = to_sd_clock(hw);
269 u32 val = readl(clock->csn.reg);
270
271 val &= ~(CPG_SD_STP_MASK);
272 val |= clock->div_table[clock->cur_div_idx].val & CPG_SD_STP_MASK;
273 280
274 writel(val, clock->csn.reg); 281 cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
282 clock->div_table[clock->cur_div_idx].val &
283 CPG_SD_STP_MASK);
275 284
276 return 0; 285 return 0;
277} 286}
@@ -280,7 +289,7 @@ static void cpg_sd_clock_disable(struct clk_hw *hw)
280{ 289{
281 struct sd_clock *clock = to_sd_clock(hw); 290 struct sd_clock *clock = to_sd_clock(hw);
282 291
283 writel(readl(clock->csn.reg) | CPG_SD_STP_MASK, clock->csn.reg); 292 cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
284} 293}
285 294
286static int cpg_sd_clock_is_enabled(struct clk_hw *hw) 295static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
@@ -327,7 +336,6 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
327{ 336{
328 struct sd_clock *clock = to_sd_clock(hw); 337 struct sd_clock *clock = to_sd_clock(hw);
329 unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate); 338 unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
330 u32 val;
331 unsigned int i; 339 unsigned int i;
332 340
333 for (i = 0; i < clock->div_num; i++) 341 for (i = 0; i < clock->div_num; i++)
@@ -339,10 +347,9 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
339 347
340 clock->cur_div_idx = i; 348 clock->cur_div_idx = i;
341 349
342 val = readl(clock->csn.reg); 350 cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
343 val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK); 351 clock->div_table[i].val &
344 val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK); 352 (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
345 writel(val, clock->csn.reg);
346 353
347 return 0; 354 return 0;
348} 355}
@@ -415,6 +422,92 @@ free_clock:
415 return clk; 422 return clk;
416} 423}
417 424
425struct rpc_clock {
426 struct clk_divider div;
427 struct clk_gate gate;
428 /*
429 * One notifier covers both RPC and RPCD2 clocks as they are both
430 * controlled by the same RPCCKCR register...
431 */
432 struct cpg_simple_notifier csn;
433};
434
435static const struct clk_div_table cpg_rpcsrc_div_table[] = {
436 { 2, 5 }, { 3, 6 }, { 0, 0 },
437};
438
439static const struct clk_div_table cpg_rpc_div_table[] = {
440 { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
441};
442
443static struct clk * __init cpg_rpc_clk_register(const char *name,
444 void __iomem *base, const char *parent_name,
445 struct raw_notifier_head *notifiers)
446{
447 struct rpc_clock *rpc;
448 struct clk *clk;
449
450 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
451 if (!rpc)
452 return ERR_PTR(-ENOMEM);
453
454 rpc->div.reg = base + CPG_RPCCKCR;
455 rpc->div.width = 3;
456 rpc->div.table = cpg_rpc_div_table;
457 rpc->div.lock = &cpg_lock;
458
459 rpc->gate.reg = base + CPG_RPCCKCR;
460 rpc->gate.bit_idx = 8;
461 rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
462 rpc->gate.lock = &cpg_lock;
463
464 rpc->csn.reg = base + CPG_RPCCKCR;
465
466 clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
467 &rpc->div.hw, &clk_divider_ops,
468 &rpc->gate.hw, &clk_gate_ops, 0);
469 if (IS_ERR(clk)) {
470 kfree(rpc);
471 return clk;
472 }
473
474 cpg_simple_notifier_register(notifiers, &rpc->csn);
475 return clk;
476}
477
478struct rpcd2_clock {
479 struct clk_fixed_factor fixed;
480 struct clk_gate gate;
481};
482
483static struct clk * __init cpg_rpcd2_clk_register(const char *name,
484 void __iomem *base,
485 const char *parent_name)
486{
487 struct rpcd2_clock *rpcd2;
488 struct clk *clk;
489
490 rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
491 if (!rpcd2)
492 return ERR_PTR(-ENOMEM);
493
494 rpcd2->fixed.mult = 1;
495 rpcd2->fixed.div = 2;
496
497 rpcd2->gate.reg = base + CPG_RPCCKCR;
498 rpcd2->gate.bit_idx = 9;
499 rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
500 rpcd2->gate.lock = &cpg_lock;
501
502 clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
503 &rpcd2->fixed.hw, &clk_fixed_factor_ops,
504 &rpcd2->gate.hw, &clk_gate_ops, 0);
505 if (IS_ERR(clk))
506 kfree(rpcd2);
507
508 return clk;
509}
510
418 511
419static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata; 512static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
420static unsigned int cpg_clk_extalr __initdata; 513static unsigned int cpg_clk_extalr __initdata;
@@ -593,6 +686,21 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
593 } 686 }
594 break; 687 break;
595 688
689 case CLK_TYPE_GEN3_RPCSRC:
690 return clk_register_divider_table(NULL, core->name,
691 __clk_get_name(parent), 0,
692 base + CPG_RPCCKCR, 3, 2, 0,
693 cpg_rpcsrc_div_table,
694 &cpg_lock);
695
696 case CLK_TYPE_GEN3_RPC:
697 return cpg_rpc_clk_register(core->name, base,
698 __clk_get_name(parent), notifiers);
699
700 case CLK_TYPE_GEN3_RPCD2:
701 return cpg_rpcd2_clk_register(core->name, base,
702 __clk_get_name(parent));
703
596 default: 704 default:
597 return ERR_PTR(-EINVAL); 705 return ERR_PTR(-EINVAL);
598 } 706 }
@@ -613,5 +721,8 @@ int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
613 if (attr) 721 if (attr)
614 cpg_quirks = (uintptr_t)attr->data; 722 cpg_quirks = (uintptr_t)attr->data;
615 pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks); 723 pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks);
724
725 spin_lock_init(&cpg_lock);
726
616 return 0; 727 return 0;
617} 728}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index f4fb6cf16688..eac1b057455a 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -23,6 +23,9 @@ enum rcar_gen3_clk_types {
23 CLK_TYPE_GEN3_Z2, 23 CLK_TYPE_GEN3_Z2,
24 CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */ 24 CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */
25 CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */ 25 CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */
26 CLK_TYPE_GEN3_RPCSRC,
27 CLK_TYPE_GEN3_RPC,
28 CLK_TYPE_GEN3_RPCD2,
26 29
27 /* SoC specific definitions start here */ 30 /* SoC specific definitions start here */
28 CLK_TYPE_GEN3_SOC_BASE, 31 CLK_TYPE_GEN3_SOC_BASE,
@@ -57,6 +60,7 @@ struct rcar_gen3_cpg_pll_config {
57 u8 osc_prediv; 60 u8 osc_prediv;
58}; 61};
59 62
63#define CPG_RPCCKCR 0x238
60#define CPG_RCKCR 0x240 64#define CPG_RCKCR 0x240
61 65
62struct clk *rcar_gen3_cpg_clk_register(struct device *dev, 66struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 59d4d46667ce..54066e6508d3 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1028,6 +1028,7 @@ static unsigned long __init exynos4_get_xom(void)
1028 xom = readl(chipid_base + 8); 1028 xom = readl(chipid_base + 8);
1029 1029
1030 iounmap(chipid_base); 1030 iounmap(chipid_base);
1031 of_node_put(np);
1031 } 1032 }
1032 1033
1033 return xom; 1034 return xom;
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 93306283d764..8ae44b5db4c2 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -136,15 +136,20 @@ static int __init exynos5_clk_register_subcmu(struct device *parent,
136{ 136{
137 struct of_phandle_args genpdspec = { .np = pd_node }; 137 struct of_phandle_args genpdspec = { .np = pd_node };
138 struct platform_device *pdev; 138 struct platform_device *pdev;
139 int ret;
140
141 pdev = platform_device_alloc("exynos5-subcmu", PLATFORM_DEVID_AUTO);
142 if (!pdev)
143 return -ENOMEM;
139 144
140 pdev = platform_device_alloc(info->pd_name, -1);
141 pdev->dev.parent = parent; 145 pdev->dev.parent = parent;
142 pdev->driver_override = "exynos5-subcmu";
143 platform_set_drvdata(pdev, (void *)info); 146 platform_set_drvdata(pdev, (void *)info);
144 of_genpd_add_device(&genpdspec, &pdev->dev); 147 of_genpd_add_device(&genpdspec, &pdev->dev);
145 platform_device_add(pdev); 148 ret = platform_device_add(pdev);
149 if (ret)
150 platform_device_put(pdev);
146 151
147 return 0; 152 return ret;
148} 153}
149 154
150static int __init exynos5_clk_probe(struct platform_device *pdev) 155static int __init exynos5_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 751e2c4fb65b..dae1c96de933 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -559,7 +559,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
559 /* ENABLE_ACLK_TOP */ 559 /* ENABLE_ACLK_TOP */
560 GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400", 560 GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400",
561 ENABLE_ACLK_TOP, 30, CLK_IS_CRITICAL, 0), 561 ENABLE_ACLK_TOP, 30, CLK_IS_CRITICAL, 0),
562 GATE(CLK_ACLK_IMEM_SSX_266, "aclk_imem_ssx_266", 562 GATE(CLK_ACLK_IMEM_SSSX_266, "aclk_imem_sssx_266",
563 "div_aclk_imem_sssx_266", ENABLE_ACLK_TOP, 563 "div_aclk_imem_sssx_266", ENABLE_ACLK_TOP,
564 29, CLK_IGNORE_UNUSED, 0), 564 29, CLK_IGNORE_UNUSED, 0),
565 GATE(CLK_ACLK_BUS0_400, "aclk_bus0_400", "div_aclk_bus0_400", 565 GATE(CLK_ACLK_BUS0_400, "aclk_bus0_400", "div_aclk_bus0_400",
@@ -568,10 +568,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
568 GATE(CLK_ACLK_BUS1_400, "aclk_bus1_400", "div_aclk_bus1_400", 568 GATE(CLK_ACLK_BUS1_400, "aclk_bus1_400", "div_aclk_bus1_400",
569 ENABLE_ACLK_TOP, 25, 569 ENABLE_ACLK_TOP, 25,
570 CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), 570 CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
571 GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_266", 571 GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_200",
572 ENABLE_ACLK_TOP, 24, 572 ENABLE_ACLK_TOP, 24,
573 CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), 573 CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
574 GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_200", 574 GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_266",
575 ENABLE_ACLK_TOP, 23, 575 ENABLE_ACLK_TOP, 23,
576 CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), 576 CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
577 GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b", 577 GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b",
@@ -5467,6 +5467,35 @@ static const struct samsung_cmu_info cam1_cmu_info __initconst = {
5467 .clk_name = "aclk_cam1_400", 5467 .clk_name = "aclk_cam1_400",
5468}; 5468};
5469 5469
5470/*
5471 * Register offset definitions for CMU_IMEM
5472 */
5473#define ENABLE_ACLK_IMEM_SLIMSSS 0x080c
5474#define ENABLE_PCLK_IMEM_SLIMSSS 0x0908
5475
5476static const unsigned long imem_clk_regs[] __initconst = {
5477 ENABLE_ACLK_IMEM_SLIMSSS,
5478 ENABLE_PCLK_IMEM_SLIMSSS,
5479};
5480
5481static const struct samsung_gate_clock imem_gate_clks[] __initconst = {
5482 /* ENABLE_ACLK_IMEM_SLIMSSS */
5483 GATE(CLK_ACLK_SLIMSSS, "aclk_slimsss", "aclk_imem_sssx_266",
5484 ENABLE_ACLK_IMEM_SLIMSSS, 0, CLK_IGNORE_UNUSED, 0),
5485
5486 /* ENABLE_PCLK_IMEM_SLIMSSS */
5487 GATE(CLK_PCLK_SLIMSSS, "pclk_slimsss", "aclk_imem_200",
5488 ENABLE_PCLK_IMEM_SLIMSSS, 0, CLK_IGNORE_UNUSED, 0),
5489};
5490
5491static const struct samsung_cmu_info imem_cmu_info __initconst = {
5492 .gate_clks = imem_gate_clks,
5493 .nr_gate_clks = ARRAY_SIZE(imem_gate_clks),
5494 .nr_clk_ids = IMEM_NR_CLK,
5495 .clk_regs = imem_clk_regs,
5496 .nr_clk_regs = ARRAY_SIZE(imem_clk_regs),
5497 .clk_name = "aclk_imem_200",
5498};
5470 5499
5471struct exynos5433_cmu_data { 5500struct exynos5433_cmu_data {
5472 struct samsung_clk_reg_dump *clk_save; 5501 struct samsung_clk_reg_dump *clk_save;
@@ -5655,6 +5684,9 @@ static const struct of_device_id exynos5433_cmu_of_match[] = {
5655 .compatible = "samsung,exynos5433-cmu-mscl", 5684 .compatible = "samsung,exynos5433-cmu-mscl",
5656 .data = &mscl_cmu_info, 5685 .data = &mscl_cmu_info,
5657 }, { 5686 }, {
5687 .compatible = "samsung,exynos5433-cmu-imem",
5688 .data = &imem_cmu_info,
5689 }, {
5658 }, 5690 },
5659}; 5691};
5660 5692
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 884067e4f1a1..f38f0e24e3b6 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -389,7 +389,7 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
389 ARRAY_SIZE(s3c2450_gates)); 389 ARRAY_SIZE(s3c2450_gates));
390 samsung_clk_register_alias(ctx, s3c2450_aliases, 390 samsung_clk_register_alias(ctx, s3c2450_aliases,
391 ARRAY_SIZE(s3c2450_aliases)); 391 ARRAY_SIZE(s3c2450_aliases));
392 /* fall through, as s3c2450 extends the s3c2416 clocks */ 392 /* fall through - as s3c2450 extends the s3c2416 clocks */
393 case S3C2416: 393 case S3C2416:
394 samsung_clk_register_div(ctx, s3c2416_dividers, 394 samsung_clk_register_div(ctx, s3c2416_dividers,
395 ARRAY_SIZE(s3c2416_dividers)); 395 ARRAY_SIZE(s3c2416_dividers));
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index aa7a6e6a15b6..73e03328d5c5 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -176,8 +176,7 @@ static struct clk_ops gateclk_ops = {
176 .set_parent = socfpga_clk_set_parent, 176 .set_parent = socfpga_clk_set_parent,
177}; 177};
178 178
179static void __init __socfpga_gate_init(struct device_node *node, 179void __init socfpga_gate_init(struct device_node *node)
180 const struct clk_ops *ops)
181{ 180{
182 u32 clk_gate[2]; 181 u32 clk_gate[2];
183 u32 div_reg[3]; 182 u32 div_reg[3];
@@ -188,12 +187,17 @@ static void __init __socfpga_gate_init(struct device_node *node,
188 const char *clk_name = node->name; 187 const char *clk_name = node->name;
189 const char *parent_name[SOCFPGA_MAX_PARENTS]; 188 const char *parent_name[SOCFPGA_MAX_PARENTS];
190 struct clk_init_data init; 189 struct clk_init_data init;
190 struct clk_ops *ops;
191 int rc; 191 int rc;
192 192
193 socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); 193 socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
194 if (WARN_ON(!socfpga_clk)) 194 if (WARN_ON(!socfpga_clk))
195 return; 195 return;
196 196
197 ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL);
198 if (WARN_ON(!ops))
199 return;
200
197 rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); 201 rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
198 if (rc) 202 if (rc)
199 clk_gate[0] = 0; 203 clk_gate[0] = 0;
@@ -202,8 +206,8 @@ static void __init __socfpga_gate_init(struct device_node *node,
202 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0]; 206 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
203 socfpga_clk->hw.bit_idx = clk_gate[1]; 207 socfpga_clk->hw.bit_idx = clk_gate[1];
204 208
205 gateclk_ops.enable = clk_gate_ops.enable; 209 ops->enable = clk_gate_ops.enable;
206 gateclk_ops.disable = clk_gate_ops.disable; 210 ops->disable = clk_gate_ops.disable;
207 } 211 }
208 212
209 rc = of_property_read_u32(node, "fixed-divider", &fixed_div); 213 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
@@ -234,6 +238,11 @@ static void __init __socfpga_gate_init(struct device_node *node,
234 init.flags = 0; 238 init.flags = 0;
235 239
236 init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS); 240 init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
241 if (init.num_parents < 2) {
242 ops->get_parent = NULL;
243 ops->set_parent = NULL;
244 }
245
237 init.parent_names = parent_name; 246 init.parent_names = parent_name;
238 socfpga_clk->hw.hw.init = &init; 247 socfpga_clk->hw.hw.init = &init;
239 248
@@ -246,8 +255,3 @@ static void __init __socfpga_gate_init(struct device_node *node,
246 if (WARN_ON(rc)) 255 if (WARN_ON(rc))
247 return; 256 return;
248} 257}
249
250void __init socfpga_gate_init(struct device_node *node)
251{
252 __socfpga_gate_init(node, &gateclk_ops);
253}
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 35fabe1a32c3..269467e8e07e 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -95,6 +95,7 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
95 95
96 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); 96 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
97 clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0); 97 clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0);
98 of_node_put(clkmgr_np);
98 BUG_ON(!clk_mgr_a10_base_addr); 99 BUG_ON(!clk_mgr_a10_base_addr);
99 pll_clk->hw.reg = clk_mgr_a10_base_addr + reg; 100 pll_clk->hw.reg = clk_mgr_a10_base_addr + reg;
100 101
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 2d5d8b43727e..c4d0b6f6abf2 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
43 /* Read mdiv and fdiv from the fdbck register */ 43 /* Read mdiv and fdiv from the fdbck register */
44 reg = readl(socfpgaclk->hw.reg + 0x4); 44 reg = readl(socfpgaclk->hw.reg + 0x4);
45 mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT; 45 mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
46 vco_freq = (unsigned long long)parent_rate * (mdiv + 6); 46 vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
47 47
48 return (unsigned long)vco_freq; 48 return (unsigned long)vco_freq;
49} 49}
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index c7f463172e4b..b4b44e9b5901 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -100,6 +100,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
100 100
101 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); 101 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
102 clk_mgr_base_addr = of_iomap(clkmgr_np, 0); 102 clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
103 of_node_put(clkmgr_np);
103 BUG_ON(!clk_mgr_base_addr); 104 BUG_ON(!clk_mgr_base_addr);
104 pll_clk->hw.reg = clk_mgr_base_addr + reg; 105 pll_clk->hw.reg = clk_mgr_base_addr + reg;
105 106
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 5b238fc314ac..8281dfbf38c2 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -12,17 +12,17 @@
12 12
13#include "stratix10-clk.h" 13#include "stratix10-clk.h"
14 14
15static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk", 15static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
16 "f2s_free_clk",}; 16 "f2s-free-clk",};
17static const char * const cntr_mux[] = { "main_pll", "periph_pll", 17static const char * const cntr_mux[] = { "main_pll", "periph_pll",
18 "osc1", "cb_intosc_hs_div2_clk", 18 "osc1", "cb-intosc-hs-div2-clk",
19 "f2s_free_clk"}; 19 "f2s-free-clk"};
20static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",}; 20static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
21 21
22static const char * const noc_free_mux[] = {"main_noc_base_clk", 22static const char * const noc_free_mux[] = {"main_noc_base_clk",
23 "peri_noc_base_clk", 23 "peri_noc_base_clk",
24 "osc1", "cb_intosc_hs_div2_clk", 24 "osc1", "cb-intosc-hs-div2-clk",
25 "f2s_free_clk"}; 25 "f2s-free-clk"};
26 26
27static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; 27static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
28static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; 28static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
@@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"
33static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; 33static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
34static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; 34static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
35 35
36static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"}; 36static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
37static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; 37static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
38static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; 38static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
39 39
40static const char * const mpu_free_mux[] = {"main_mpu_base_clk", 40static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
41 "peri_mpu_base_clk", 41 "peri_mpu_base_clk",
42 "osc1", "cb_intosc_hs_div2_clk", 42 "osc1", "cb-intosc-hs-div2-clk",
43 "f2s_free_clk"}; 43 "f2s-free-clk"};
44 44
45/* clocks in AO (always on) controller */ 45/* clocks in AO (always on) controller */
46static const struct stratix10_pll_clock s10_pll_clks[] = { 46static const struct stratix10_pll_clock s10_pll_clks[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 3b97f60540ad..609970c0b666 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1",
264static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1", 264static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1",
265 0x060, BIT(10), 0); 265 0x060, BIT(10), 0);
266static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1", 266static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1",
267 0x060, BIT(12), 0); 267 0x060, BIT(11), 0);
268static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1", 268static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1",
269 0x060, BIT(13), 0); 269 0x060, BIT(12), 0);
270static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1", 270static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1",
271 0x060, BIT(13), 0); 271 0x060, BIT(13), 0);
272static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1", 272static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index a4fa2945f230..4b5f8f4e4ab8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -144,7 +144,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
144 8, 4, /* N */ 144 8, 4, /* N */
145 4, 2, /* K */ 145 4, 2, /* K */
146 0, 4, /* M */ 146 0, 4, /* M */
147 BIT(31), /* gate */ 147 BIT(31) | BIT(23) | BIT(22), /* gate */
148 BIT(28), /* lock */ 148 BIT(28), /* lock */
149 CLK_SET_RATE_UNGATE); 149 CLK_SET_RATE_UNGATE);
150 150
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 621b1cd996db..ac12f261f8ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
542 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, 542 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
543 543
544 [RST_BUS_VE] = { 0x2c4, BIT(0) }, 544 [RST_BUS_VE] = { 0x2c4, BIT(0) },
545 [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, 545 [RST_BUS_TCON0] = { 0x2c4, BIT(4) },
546 [RST_BUS_CSI] = { 0x2c4, BIT(8) }, 546 [RST_BUS_CSI] = { 0x2c4, BIT(8) },
547 [RST_BUS_DE] = { 0x2c4, BIT(12) }, 547 [RST_BUS_DE] = { 0x2c4, BIT(12) },
548 [RST_BUS_DBG] = { 0x2c4, BIT(31) }, 548 [RST_BUS_DBG] = { 0x2c4, BIT(31) },
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 609e363dabf8..7ec752ed3499 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1112,8 +1112,8 @@ static int attr_enable_set(void *data, u64 val)
1112 1112
1113 return val ? dfll_enable(td) : dfll_disable(td); 1113 return val ? dfll_enable(td) : dfll_disable(td);
1114} 1114}
1115DEFINE_SIMPLE_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set, 1115DEFINE_DEBUGFS_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set,
1116 "%llu\n"); 1116 "%llu\n");
1117 1117
1118static int attr_lock_get(void *data, u64 *val) 1118static int attr_lock_get(void *data, u64 *val)
1119{ 1119{
@@ -1129,8 +1129,7 @@ static int attr_lock_set(void *data, u64 val)
1129 1129
1130 return val ? dfll_lock(td) : dfll_unlock(td); 1130 return val ? dfll_lock(td) : dfll_unlock(td);
1131} 1131}
1132DEFINE_SIMPLE_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set, 1132DEFINE_DEBUGFS_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set, "%llu\n");
1133 "%llu\n");
1134 1133
1135static int attr_rate_get(void *data, u64 *val) 1134static int attr_rate_get(void *data, u64 *val)
1136{ 1135{
@@ -1147,7 +1146,7 @@ static int attr_rate_set(void *data, u64 val)
1147 1146
1148 return dfll_request_rate(td, val); 1147 return dfll_request_rate(td, val);
1149} 1148}
1150DEFINE_SIMPLE_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n"); 1149DEFINE_DEBUGFS_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n");
1151 1150
1152static int attr_registers_show(struct seq_file *s, void *data) 1151static int attr_registers_show(struct seq_file *s, void *data)
1153{ 1152{
@@ -1196,10 +1195,11 @@ static void dfll_debug_init(struct tegra_dfll *td)
1196 root = debugfs_create_dir("tegra_dfll_fcpu", NULL); 1195 root = debugfs_create_dir("tegra_dfll_fcpu", NULL);
1197 td->debugfs_dir = root; 1196 td->debugfs_dir = root;
1198 1197
1199 debugfs_create_file("enable", S_IRUGO | S_IWUSR, root, td, &enable_fops); 1198 debugfs_create_file_unsafe("enable", 0644, root, td,
1200 debugfs_create_file("lock", S_IRUGO, root, td, &lock_fops); 1199 &enable_fops);
1201 debugfs_create_file("rate", S_IRUGO, root, td, &rate_fops); 1200 debugfs_create_file_unsafe("lock", 0444, root, td, &lock_fops);
1202 debugfs_create_file("registers", S_IRUGO, root, td, &attr_registers_fops); 1201 debugfs_create_file_unsafe("rate", 0444, root, td, &rate_fops);
1202 debugfs_create_file("registers", 0444, root, td, &attr_registers_fops);
1203} 1203}
1204 1204
1205#else 1205#else
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 269d3595758b..edc31bb56674 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -133,9 +133,11 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
133 struct tegra_dfll_soc_data *soc; 133 struct tegra_dfll_soc_data *soc;
134 134
135 soc = tegra_dfll_unregister(pdev); 135 soc = tegra_dfll_unregister(pdev);
136 if (IS_ERR(soc)) 136 if (IS_ERR(soc)) {
137 dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n", 137 dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
138 PTR_ERR(soc)); 138 PTR_ERR(soc));
139 return PTR_ERR(soc);
140 }
139 141
140 tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq); 142 tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
141 143
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 688e403333b9..0c210984765a 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -614,7 +614,7 @@ static int ti_adpll_init_clkout(struct ti_adpll_data *d,
614 614
615 init.name = child_name; 615 init.name = child_name;
616 init.ops = ops; 616 init.ops = ops;
617 init.flags = CLK_IS_BASIC; 617 init.flags = 0;
618 co->hw.init = &init; 618 co->hw.init = &init;
619 parent_names[0] = __clk_get_name(clk0); 619 parent_names[0] = __clk_get_name(clk0);
620 parent_names[1] = __clk_get_name(clk1); 620 parent_names[1] = __clk_get_name(clk1);
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 222f68bc3f2a..015a657d3382 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -165,7 +165,7 @@ static void __init omap_clk_register_apll(void *user,
165 165
166 ad->clk_bypass = __clk_get_hw(clk); 166 ad->clk_bypass = __clk_get_hw(clk);
167 167
168 clk = ti_clk_register(NULL, &clk_hw->hw, node->name); 168 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
169 if (!IS_ERR(clk)) { 169 if (!IS_ERR(clk)) {
170 of_clk_add_provider(node, of_clk_src_simple_get, clk); 170 of_clk_add_provider(node, of_clk_src_simple_get, clk);
171 kfree(clk_hw->hw.init->parent_names); 171 kfree(clk_hw->hw.init->parent_names);
@@ -402,7 +402,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
402 if (ret) 402 if (ret)
403 goto cleanup; 403 goto cleanup;
404 404
405 clk = clk_register(NULL, &clk_hw->hw); 405 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
406 if (!IS_ERR(clk)) { 406 if (!IS_ERR(clk)) {
407 of_clk_add_provider(node, of_clk_src_simple_get, clk); 407 of_clk_add_provider(node, of_clk_src_simple_get, clk);
408 kfree(init); 408 kfree(init);
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index 7bb9afbe4058..1cae226759dd 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -35,7 +35,44 @@ struct clk_ti_autoidle {
35#define AUTOIDLE_LOW 0x1 35#define AUTOIDLE_LOW 0x1
36 36
37static LIST_HEAD(autoidle_clks); 37static LIST_HEAD(autoidle_clks);
38static LIST_HEAD(clk_hw_omap_clocks); 38
39/*
40 * we have some non-atomic read/write
41 * operations behind it, so lets
42 * take one lock for handling autoidle
43 * of all clocks
44 */
45static DEFINE_SPINLOCK(autoidle_spinlock);
46
47static int _omap2_clk_deny_idle(struct clk_hw_omap *clk)
48{
49 if (clk->ops && clk->ops->deny_idle) {
50 unsigned long irqflags;
51
52 spin_lock_irqsave(&autoidle_spinlock, irqflags);
53 clk->autoidle_count++;
54 if (clk->autoidle_count == 1)
55 clk->ops->deny_idle(clk);
56
57 spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
58 }
59 return 0;
60}
61
62static int _omap2_clk_allow_idle(struct clk_hw_omap *clk)
63{
64 if (clk->ops && clk->ops->allow_idle) {
65 unsigned long irqflags;
66
67 spin_lock_irqsave(&autoidle_spinlock, irqflags);
68 clk->autoidle_count--;
69 if (clk->autoidle_count == 0)
70 clk->ops->allow_idle(clk);
71
72 spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
73 }
74 return 0;
75}
39 76
40/** 77/**
41 * omap2_clk_deny_idle - disable autoidle on an OMAP clock 78 * omap2_clk_deny_idle - disable autoidle on an OMAP clock
@@ -45,12 +82,15 @@ static LIST_HEAD(clk_hw_omap_clocks);
45 */ 82 */
46int omap2_clk_deny_idle(struct clk *clk) 83int omap2_clk_deny_idle(struct clk *clk)
47{ 84{
48 struct clk_hw_omap *c; 85 struct clk_hw *hw = __clk_get_hw(clk);
49 86
50 c = to_clk_hw_omap(__clk_get_hw(clk)); 87 if (omap2_clk_is_hw_omap(hw)) {
51 if (c->ops && c->ops->deny_idle) 88 struct clk_hw_omap *c = to_clk_hw_omap(hw);
52 c->ops->deny_idle(c); 89
53 return 0; 90 return _omap2_clk_deny_idle(c);
91 }
92
93 return -EINVAL;
54} 94}
55 95
56/** 96/**
@@ -61,12 +101,15 @@ int omap2_clk_deny_idle(struct clk *clk)
61 */ 101 */
62int omap2_clk_allow_idle(struct clk *clk) 102int omap2_clk_allow_idle(struct clk *clk)
63{ 103{
64 struct clk_hw_omap *c; 104 struct clk_hw *hw = __clk_get_hw(clk);
65 105
66 c = to_clk_hw_omap(__clk_get_hw(clk)); 106 if (omap2_clk_is_hw_omap(hw)) {
67 if (c->ops && c->ops->allow_idle) 107 struct clk_hw_omap *c = to_clk_hw_omap(hw);
68 c->ops->allow_idle(c); 108
69 return 0; 109 return _omap2_clk_allow_idle(c);
110 }
111
112 return -EINVAL;
70} 113}
71 114
72static void _allow_autoidle(struct clk_ti_autoidle *clk) 115static void _allow_autoidle(struct clk_ti_autoidle *clk)
@@ -168,26 +211,6 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node)
168} 211}
169 212
170/** 213/**
171 * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
172 * @hw: struct clk_hw * to initialize
173 *
174 * Add an OMAP clock @clk to the internal list of OMAP clocks. Used
175 * temporarily for autoidle handling, until this support can be
176 * integrated into the common clock framework code in some way. No
177 * return value.
178 */
179void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
180{
181 struct clk_hw_omap *c;
182
183 if (clk_hw_get_flags(hw) & CLK_IS_BASIC)
184 return;
185
186 c = to_clk_hw_omap(hw);
187 list_add(&c->node, &clk_hw_omap_clocks);
188}
189
190/**
191 * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that 214 * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
192 * support it 215 * support it
193 * 216 *
@@ -198,11 +221,11 @@ void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
198 */ 221 */
199int omap2_clk_enable_autoidle_all(void) 222int omap2_clk_enable_autoidle_all(void)
200{ 223{
201 struct clk_hw_omap *c; 224 int ret;
202 225
203 list_for_each_entry(c, &clk_hw_omap_clocks, node) 226 ret = omap2_clk_for_each(_omap2_clk_allow_idle);
204 if (c->ops && c->ops->allow_idle) 227 if (ret)
205 c->ops->allow_idle(c); 228 return ret;
206 229
207 _clk_generic_allow_autoidle_all(); 230 _clk_generic_allow_autoidle_all();
208 231
@@ -220,11 +243,11 @@ int omap2_clk_enable_autoidle_all(void)
220 */ 243 */
221int omap2_clk_disable_autoidle_all(void) 244int omap2_clk_disable_autoidle_all(void)
222{ 245{
223 struct clk_hw_omap *c; 246 int ret;
224 247
225 list_for_each_entry(c, &clk_hw_omap_clocks, node) 248 ret = omap2_clk_for_each(_omap2_clk_deny_idle);
226 if (c->ops && c->ops->deny_idle) 249 if (ret)
227 c->ops->deny_idle(c); 250 return ret;
228 251
229 _clk_generic_deny_autoidle_all(); 252 _clk_generic_deny_autoidle_all();
230 253
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index d0cd58534781..ff164a33f67d 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -31,6 +31,7 @@
31#undef pr_fmt 31#undef pr_fmt
32#define pr_fmt(fmt) "%s: " fmt, __func__ 32#define pr_fmt(fmt) "%s: " fmt, __func__
33 33
34static LIST_HEAD(clk_hw_omap_clocks);
34struct ti_clk_ll_ops *ti_clk_ll_ops; 35struct ti_clk_ll_ops *ti_clk_ll_ops;
35static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS]; 36static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS];
36 37
@@ -191,9 +192,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
191 clkdev_add(&c->lk); 192 clkdev_add(&c->lk);
192 } else { 193 } else {
193 if (num_args && !has_clkctrl_data) { 194 if (num_args && !has_clkctrl_data) {
194 if (of_find_compatible_node(NULL, NULL, 195 struct device_node *np;
195 "ti,clkctrl")) { 196
197 np = of_find_compatible_node(NULL, NULL,
198 "ti,clkctrl");
199 if (np) {
196 has_clkctrl_data = true; 200 has_clkctrl_data = true;
201 of_node_put(np);
197 } else { 202 } else {
198 clkctrl_nodes_missing = true; 203 clkctrl_nodes_missing = true;
199 204
@@ -517,3 +522,74 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
517 522
518 return clk; 523 return clk;
519} 524}
525
526/**
527 * ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework
528 * @dev: device for this clock
529 * @hw: hardware clock handle
530 * @con: connection ID for this clock
531 *
532 * Registers a clk_hw_omap clock to the clock framewor, adds a clock alias
533 * for it, and adds the list to the available clk_hw_omap type clocks.
534 * Returns a handle to the registered clock if successful, ERR_PTR value
535 * in failure.
536 */
537struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
538 const char *con)
539{
540 struct clk *clk;
541 struct clk_hw_omap *oclk;
542
543 clk = ti_clk_register(dev, hw, con);
544 if (IS_ERR(clk))
545 return clk;
546
547 oclk = to_clk_hw_omap(hw);
548
549 list_add(&oclk->node, &clk_hw_omap_clocks);
550
551 return clk;
552}
553
554/**
555 * omap2_clk_for_each - call function for each registered clk_hw_omap
556 * @fn: pointer to a callback function
557 *
558 * Call @fn for each registered clk_hw_omap, passing @hw to each
559 * function. @fn must return 0 for success or any other value for
560 * failure. If @fn returns non-zero, the iteration across clocks
561 * will stop and the non-zero return value will be passed to the
562 * caller of omap2_clk_for_each().
563 */
564int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw))
565{
566 int ret;
567 struct clk_hw_omap *hw;
568
569 list_for_each_entry(hw, &clk_hw_omap_clocks, node) {
570 ret = (*fn)(hw);
571 if (ret)
572 break;
573 }
574
575 return ret;
576}
577
578/**
579 * omap2_clk_is_hw_omap - check if the provided clk_hw is OMAP clock
580 * @hw: clk_hw to check if it is an omap clock or not
581 *
582 * Checks if the provided clk_hw is OMAP clock or not. Returns true if
583 * it is, false otherwise.
584 */
585bool omap2_clk_is_hw_omap(struct clk_hw *hw)
586{
587 struct clk_hw_omap *oclk;
588
589 list_for_each_entry(oclk, &clk_hw_omap_clocks, node) {
590 if (&oclk->hw == hw)
591 return true;
592 }
593
594 return false;
595}
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 40630eb950fc..bf32d996177f 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -276,7 +276,7 @@ _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
276 init.parent_names = parents; 276 init.parent_names = parents;
277 init.num_parents = num_parents; 277 init.num_parents = num_parents;
278 init.ops = ops; 278 init.ops = ops;
279 init.flags = CLK_IS_BASIC; 279 init.flags = 0;
280 280
281 clk = ti_clk_register(NULL, clk_hw, init.name); 281 clk = ti_clk_register(NULL, clk_hw, init.name);
282 if (IS_ERR_OR_NULL(clk)) { 282 if (IS_ERR_OR_NULL(clk)) {
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 9f312a219510..1c0fac59d809 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -203,6 +203,8 @@ typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
203 203
204struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw, 204struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
205 const char *con); 205 const char *con);
206struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
207 const char *con);
206int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con); 208int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con);
207void ti_clk_add_aliases(void); 209void ti_clk_add_aliases(void);
208 210
@@ -221,7 +223,6 @@ int ti_clk_retry_init(struct device_node *node, void *user,
221 ti_of_clk_init_cb_t func); 223 ti_of_clk_init_cb_t func);
222int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type); 224int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
223 225
224void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw);
225int of_ti_clk_autoidle_setup(struct device_node *node); 226int of_ti_clk_autoidle_setup(struct device_node *node);
226void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); 227void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
227 228
@@ -301,6 +302,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
301 unsigned long *parent_rate); 302 unsigned long *parent_rate);
302int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, 303int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
303 struct clk_rate_request *req); 304 struct clk_rate_request *req);
305int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
306bool omap2_clk_is_hw_omap(struct clk_hw *hw);
304 307
305extern struct ti_clk_ll_ops *ti_clk_ll_ops; 308extern struct ti_clk_ll_ops *ti_clk_ll_ops;
306 309
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index 07a805125e98..423a99b9f10c 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -143,7 +143,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
143 continue; 143 continue;
144 } 144 }
145 clk_hw = __clk_get_hw(clk); 145 clk_hw = __clk_get_hw(clk);
146 if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) { 146 if (!omap2_clk_is_hw_omap(clk_hw)) {
147 pr_warn("can't setup clkdm for basic clk %s\n", 147 pr_warn("can't setup clkdm for basic clk %s\n",
148 __clk_get_name(clk)); 148 __clk_get_name(clk));
149 continue; 149 continue;
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 8d77090ad94a..4786e0ebc2e8 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -336,7 +336,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
336 336
337 init.name = name; 337 init.name = name;
338 init.ops = &ti_clk_divider_ops; 338 init.ops = &ti_clk_divider_ops;
339 init.flags = flags | CLK_IS_BASIC; 339 init.flags = flags;
340 init.parent_names = (parent_name ? &parent_name : NULL); 340 init.parent_names = (parent_name ? &parent_name : NULL);
341 init.num_parents = (parent_name ? 1 : 0); 341 init.num_parents = (parent_name ? 1 : 0);
342 342
@@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
403 num_dividers = i; 403 num_dividers = i;
404 404
405 tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); 405 tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
406 if (!tmp) 406 if (!tmp) {
407 *table = ERR_PTR(-ENOMEM);
407 return -ENOMEM; 408 return -ENOMEM;
409 }
408 410
409 valid_div = 0; 411 valid_div = 0;
410 *width = 0; 412 *width = 0;
@@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
439{ 441{
440 struct clk_omap_divider *div; 442 struct clk_omap_divider *div;
441 struct clk_omap_reg *reg; 443 struct clk_omap_reg *reg;
444 int ret;
442 445
443 if (!setup) 446 if (!setup)
444 return NULL; 447 return NULL;
@@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
458 div->flags |= CLK_DIVIDER_POWER_OF_TWO; 461 div->flags |= CLK_DIVIDER_POWER_OF_TWO;
459 462
460 div->table = _get_div_table_from_setup(setup, &div->width); 463 div->table = _get_div_table_from_setup(setup, &div->width);
464 if (IS_ERR(div->table)) {
465 ret = PTR_ERR(div->table);
466 kfree(div);
467 return ERR_PTR(ret);
468 }
469
461 470
462 div->shift = setup->bit_shift; 471 div->shift = setup->bit_shift;
463 div->latch = -EINVAL; 472 div->latch = -EINVAL;
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 6c3329bc116f..659dadb23279 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -192,10 +192,9 @@ static void __init _register_dpll(void *user,
192 dd->clk_bypass = __clk_get_hw(clk); 192 dd->clk_bypass = __clk_get_hw(clk);
193 193
194 /* register the clock */ 194 /* register the clock */
195 clk = ti_clk_register(NULL, &clk_hw->hw, node->name); 195 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
196 196
197 if (!IS_ERR(clk)) { 197 if (!IS_ERR(clk)) {
198 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
199 of_clk_add_provider(node, of_clk_src_simple_get, clk); 198 of_clk_add_provider(node, of_clk_src_simple_get, clk);
200 kfree(clk_hw->hw.init->parent_names); 199 kfree(clk_hw->hw.init->parent_names);
201 kfree(clk_hw->hw.init); 200 kfree(clk_hw->hw.init);
@@ -265,14 +264,12 @@ static void _register_dpll_x2(struct device_node *node,
265#endif 264#endif
266 265
267 /* register the clock */ 266 /* register the clock */
268 clk = ti_clk_register(NULL, &clk_hw->hw, name); 267 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
269 268
270 if (IS_ERR(clk)) { 269 if (IS_ERR(clk))
271 kfree(clk_hw); 270 kfree(clk_hw);
272 } else { 271 else
273 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
274 of_clk_add_provider(node, of_clk_src_simple_get, clk); 272 of_clk_add_provider(node, of_clk_src_simple_get, clk);
275 }
276} 273}
277#endif 274#endif
278 275
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 44b6b6403753..3dde6c8c3354 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -731,7 +731,7 @@ static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
731 do { 731 do {
732 do { 732 do {
733 hw = clk_hw_get_parent(hw); 733 hw = clk_hw_get_parent(hw);
734 } while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC)); 734 } while (hw && (!omap2_clk_is_hw_omap(hw)));
735 if (!hw) 735 if (!hw)
736 break; 736 break;
737 pclk = to_clk_hw_omap(hw); 737 pclk = to_clk_hw_omap(hw);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 1c78fff5513c..504c0e91cdc7 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -123,7 +123,7 @@ static struct clk *_register_gate(struct device *dev, const char *name,
123 123
124 init.flags = flags; 124 init.flags = flags;
125 125
126 clk = ti_clk_register(NULL, &clk_hw->hw, name); 126 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
127 127
128 if (IS_ERR(clk)) 128 if (IS_ERR(clk))
129 kfree(clk_hw); 129 kfree(clk_hw);
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 87e00c2ee957..83e34429d3b1 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -57,12 +57,10 @@ static struct clk *_register_interface(struct device *dev, const char *name,
57 init.num_parents = 1; 57 init.num_parents = 1;
58 init.parent_names = &parent_name; 58 init.parent_names = &parent_name;
59 59
60 clk = ti_clk_register(NULL, &clk_hw->hw, name); 60 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
61 61
62 if (IS_ERR(clk)) 62 if (IS_ERR(clk))
63 kfree(clk_hw); 63 kfree(clk_hw);
64 else
65 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
66 64
67 return clk; 65 return clk;
68} 66}
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 883bdde94d04..b7f9a4f068bf 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -143,7 +143,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
143 143
144 init.name = name; 144 init.name = name;
145 init.ops = &ti_clk_mux_ops; 145 init.ops = &ti_clk_mux_ops;
146 init.flags = flags | CLK_IS_BASIC; 146 init.flags = flags;
147 init.parent_names = parent_names; 147 init.parent_names = parent_names;
148 init.num_parents = num_parents; 148 init.num_parents = num_parents;
149 149
diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
index ec11f55594ad..5d2d42b7e182 100644
--- a/drivers/clk/uniphier/clk-uniphier-cpugear.c
+++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
@@ -47,7 +47,7 @@ static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index)
47 return ret; 47 return ret;
48 48
49 ret = regmap_write_bits(gear->regmap, 49 ret = regmap_write_bits(gear->regmap,
50 gear->regbase + UNIPHIER_CLK_CPUGEAR_SET, 50 gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD,
51 UNIPHIER_CLK_CPUGEAR_UPD_BIT, 51 UNIPHIER_CLK_CPUGEAR_UPD_BIT,
52 UNIPHIER_CLK_CPUGEAR_UPD_BIT); 52 UNIPHIER_CLK_CPUGEAR_UPD_BIT);
53 if (ret) 53 if (ret)
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c
index 3a0996f2d556..25d4b97aff9b 100644
--- a/drivers/clk/x86/clk-st.c
+++ b/drivers/clk/x86/clk-st.c
@@ -52,7 +52,8 @@ static int st_clk_probe(struct platform_device *pdev)
52 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, 52 0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
53 CLK_GATE_SET_TO_DISABLE, NULL); 53 CLK_GATE_SET_TO_DISABLE, NULL);
54 54
55 clk_hw_register_clkdev(hws[ST_CLK_GATE], "oscout1", NULL); 55 devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE], "oscout1",
56 NULL);
56 57
57 return 0; 58 return 0;
58} 59}
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index f65cc0ff76ab..b0908ec62f73 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np)
669 if (ret) 669 if (ret)
670 return ret; 670 return ret;
671 671
672 zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) * 672 zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx),
673 clock_max_idx, GFP_KERNEL); 673 GFP_KERNEL);
674 if (!zynqmp_data) 674 if (!zynqmp_data)
675 return -ENOMEM; 675 return -ENOMEM;
676 676
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 595124074821..c364027638e1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
154 if (IS_ERR(parent)) 154 if (IS_ERR(parent))
155 return -ENODEV; 155 return -ENODEV;
156 156
157 /* Bail out if both clocks point to fck */
158 if (clk_is_match(parent, timer->fclk))
159 return 0;
160
157 ret = clk_set_parent(timer->fclk, parent); 161 ret = clk_set_parent(timer->fclk, parent);
158 if (ret < 0) 162 if (ret < 0)
159 pr_err("%s: failed to set parent\n", __func__); 163 pr_err("%s: failed to set parent\n", __func__);
@@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
864 timer->pdev = pdev; 868 timer->pdev = pdev;
865 869
866 pm_runtime_enable(dev); 870 pm_runtime_enable(dev);
867 pm_runtime_irq_safe(dev);
868 871
869 if (!timer->reserved) { 872 if (!timer->reserved) {
870 ret = pm_runtime_get_sync(dev); 873 ret = pm_runtime_get_sync(dev);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6f23ebb395f1..e35a886e00bc 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1530,17 +1530,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1530{ 1530{
1531 unsigned int ret_freq = 0; 1531 unsigned int ret_freq = 0;
1532 1532
1533 if (!cpufreq_driver->get) 1533 if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
1534 return ret_freq; 1534 return ret_freq;
1535 1535
1536 ret_freq = cpufreq_driver->get(policy->cpu); 1536 ret_freq = cpufreq_driver->get(policy->cpu);
1537 1537
1538 /* 1538 /*
1539 * Updating inactive policies is invalid, so avoid doing that. Also 1539 * If fast frequency switching is used with the given policy, the check
1540 * if fast frequency switching is used with the given policy, the check
1541 * against policy->cur is pointless, so skip it in that case too. 1540 * against policy->cur is pointless, so skip it in that case too.
1542 */ 1541 */
1543 if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) 1542 if (policy->fast_switch_enabled)
1544 return ret_freq; 1543 return ret_freq;
1545 1544
1546 if (ret_freq && policy->cur && 1545 if (ret_freq && policy->cur &&
@@ -1569,10 +1568,7 @@ unsigned int cpufreq_get(unsigned int cpu)
1569 1568
1570 if (policy) { 1569 if (policy) {
1571 down_read(&policy->rwsem); 1570 down_read(&policy->rwsem);
1572 1571 ret_freq = __cpufreq_get(policy);
1573 if (!policy_is_inactive(policy))
1574 ret_freq = __cpufreq_get(policy);
1575
1576 up_read(&policy->rwsem); 1572 up_read(&policy->rwsem);
1577 1573
1578 cpufreq_cpu_put(policy); 1574 cpufreq_cpu_put(policy);
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 50b1551ba894..9ed46d188cb5 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
52 int ret; 52 int ret;
53 struct scmi_data *priv = policy->driver_data; 53 struct scmi_data *priv = policy->driver_data;
54 struct scmi_perf_ops *perf_ops = handle->perf_ops; 54 struct scmi_perf_ops *perf_ops = handle->perf_ops;
55 u64 freq = policy->freq_table[index].frequency * 1000; 55 u64 freq = policy->freq_table[index].frequency;
56 56
57 ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); 57 ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
58 if (!ret) 58 if (!ret)
59 arch_set_freq_scale(policy->related_cpus, freq, 59 arch_set_freq_scale(policy->related_cpus, freq,
60 policy->cpuinfo.max_freq); 60 policy->cpuinfo.max_freq);
@@ -176,7 +176,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
176out_free_priv: 176out_free_priv:
177 kfree(priv); 177 kfree(priv);
178out_free_opp: 178out_free_opp:
179 dev_pm_opp_cpumask_remove_table(policy->cpus); 179 dev_pm_opp_remove_all_dynamic(cpu_dev);
180 180
181 return ret; 181 return ret;
182} 182}
@@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
187 187
188 cpufreq_cooling_unregister(priv->cdev); 188 cpufreq_cooling_unregister(priv->cdev);
189 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 189 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
190 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
190 kfree(priv); 191 kfree(priv);
191 dev_pm_opp_cpumask_remove_table(policy->related_cpus);
192 192
193 return 0; 193 return 0;
194} 194}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 87a98ec77773..99449738faa4 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -177,7 +177,7 @@ out_free_cpufreq_table:
177out_free_priv: 177out_free_priv:
178 kfree(priv); 178 kfree(priv);
179out_free_opp: 179out_free_opp:
180 dev_pm_opp_cpumask_remove_table(policy->cpus); 180 dev_pm_opp_remove_all_dynamic(cpu_dev);
181 181
182 return ret; 182 return ret;
183} 183}
@@ -190,7 +190,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
190 clk_put(priv->clk); 190 clk_put(priv->clk);
191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
192 kfree(priv); 192 kfree(priv);
193 dev_pm_opp_cpumask_remove_table(policy->related_cpus); 193 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
194 194
195 return 0; 195 return 0;
196} 196}
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index b17d153e724f..23a1b27579a5 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
21 local_irq_enable(); 21 local_irq_enable();
22 if (!current_set_polling_and_test()) { 22 if (!current_set_polling_and_test()) {
23 unsigned int loop_count = 0; 23 unsigned int loop_count = 0;
24 u64 limit = TICK_USEC; 24 u64 limit = TICK_NSEC;
25 int i; 25 int i;
26 26
27 for (i = 1; i < drv->state_count; i++) { 27 for (i = 1; i < drv->state_count; i++) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5a90075f719d..0be55fcc19ba 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
692 depends on ARCH_BCM_IPROC 692 depends on ARCH_BCM_IPROC
693 depends on MAILBOX 693 depends on MAILBOX
694 default m 694 default m
695 select CRYPTO_AUTHENC
695 select CRYPTO_DES 696 select CRYPTO_DES
696 select CRYPTO_MD5 697 select CRYPTO_MD5
697 select CRYPTO_SHA1 698 select CRYPTO_SHA1
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 63cb6956c948..acf79889d903 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -283,9 +283,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
283 */ 283 */
284static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) 284static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
285{ 285{
286 dev->gdr = dma_zalloc_coherent(dev->core_dev->device, 286 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
287 sizeof(struct ce_gd) * PPC4XX_NUM_GD, 287 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
288 &dev->gdr_pa, GFP_ATOMIC); 288 &dev->gdr_pa, GFP_ATOMIC);
289 if (!dev->gdr) 289 if (!dev->gdr)
290 return -ENOMEM; 290 return -ENOMEM;
291 291
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c9393ffb70ed..5567cbda2798 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2845 struct spu_hw *spu = &iproc_priv.spu; 2845 struct spu_hw *spu = &iproc_priv.spu;
2846 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 2846 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2847 struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 2847 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2848 struct rtattr *rta = (void *)key; 2848 struct crypto_authenc_keys keys;
2849 struct crypto_authenc_key_param *param; 2849 int ret;
2850 const u8 *origkey = key;
2851 const unsigned int origkeylen = keylen;
2852
2853 int ret = 0;
2854 2850
2855 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, 2851 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2856 keylen); 2852 keylen);
2857 flow_dump(" key: ", key, keylen); 2853 flow_dump(" key: ", key, keylen);
2858 2854
2859 if (!RTA_OK(rta, keylen)) 2855 ret = crypto_authenc_extractkeys(&keys, key, keylen);
2860 goto badkey; 2856 if (ret)
2861 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
2862 goto badkey;
2863 if (RTA_PAYLOAD(rta) < sizeof(*param))
2864 goto badkey; 2857 goto badkey;
2865 2858
2866 param = RTA_DATA(rta); 2859 if (keys.enckeylen > MAX_KEY_SIZE ||
2867 ctx->enckeylen = be32_to_cpu(param->enckeylen); 2860 keys.authkeylen > MAX_KEY_SIZE)
2868
2869 key += RTA_ALIGN(rta->rta_len);
2870 keylen -= RTA_ALIGN(rta->rta_len);
2871
2872 if (keylen < ctx->enckeylen)
2873 goto badkey;
2874 if (ctx->enckeylen > MAX_KEY_SIZE)
2875 goto badkey; 2861 goto badkey;
2876 2862
2877 ctx->authkeylen = keylen - ctx->enckeylen; 2863 ctx->enckeylen = keys.enckeylen;
2878 2864 ctx->authkeylen = keys.authkeylen;
2879 if (ctx->authkeylen > MAX_KEY_SIZE)
2880 goto badkey;
2881 2865
2882 memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); 2866 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2883 /* May end up padding auth key. So make sure it's zeroed. */ 2867 /* May end up padding auth key. So make sure it's zeroed. */
2884 memset(ctx->authkey, 0, sizeof(ctx->authkey)); 2868 memset(ctx->authkey, 0, sizeof(ctx->authkey));
2885 memcpy(ctx->authkey, key, ctx->authkeylen); 2869 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2886 2870
2887 switch (ctx->alg->cipher_info.alg) { 2871 switch (ctx->alg->cipher_info.alg) {
2888 case CIPHER_ALG_DES: 2872 case CIPHER_ALG_DES:
@@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2890 u32 tmp[DES_EXPKEY_WORDS]; 2874 u32 tmp[DES_EXPKEY_WORDS];
2891 u32 flags = CRYPTO_TFM_RES_WEAK_KEY; 2875 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2892 2876
2893 if (des_ekey(tmp, key) == 0) { 2877 if (des_ekey(tmp, keys.enckey) == 0) {
2894 if (crypto_aead_get_flags(cipher) & 2878 if (crypto_aead_get_flags(cipher) &
2895 CRYPTO_TFM_REQ_WEAK_KEY) { 2879 CRYPTO_TFM_REQ_WEAK_KEY) {
2896 crypto_aead_set_flags(cipher, flags); 2880 crypto_aead_set_flags(cipher, flags);
@@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2905 break; 2889 break;
2906 case CIPHER_ALG_3DES: 2890 case CIPHER_ALG_3DES:
2907 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { 2891 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2908 const u32 *K = (const u32 *)key; 2892 const u32 *K = (const u32 *)keys.enckey;
2909 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; 2893 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
2910 2894
2911 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 2895 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2956 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 2940 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2957 ctx->fallback_cipher->base.crt_flags |= 2941 ctx->fallback_cipher->base.crt_flags |=
2958 tfm->crt_flags & CRYPTO_TFM_REQ_MASK; 2942 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2959 ret = 2943 ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2960 crypto_aead_setkey(ctx->fallback_cipher, origkey,
2961 origkeylen);
2962 if (ret) { 2944 if (ret) {
2963 flow_log(" fallback setkey() returned:%d\n", ret); 2945 flow_log(" fallback setkey() returned:%d\n", ret);
2964 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 2946 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 92e593e2069a..80ae69f906fb 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void)
3476 * Skip algorithms requiring message digests 3476 * Skip algorithms requiring message digests
3477 * if MD or MD size is not supported by device. 3477 * if MD or MD size is not supported by device.
3478 */ 3478 */
3479 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && 3479 if (is_mdha(c2_alg_sel) &&
3480 (!md_inst || t_alg->aead.maxauthsize > md_limit)) 3480 (!md_inst || t_alg->aead.maxauthsize > md_limit))
3481 continue; 3481 continue;
3482 3482
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 81712aa5d0f2..bb1a2cdf1951 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
1072 1072
1073 desc = edesc->hw_desc; 1073 desc = edesc->hw_desc;
1074 1074
1075 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1075 if (buflen) {
1076 if (dma_mapping_error(jrdev, state->buf_dma)) { 1076 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1077 dev_err(jrdev, "unable to map src\n"); 1077 DMA_TO_DEVICE);
1078 goto unmap; 1078 if (dma_mapping_error(jrdev, state->buf_dma)) {
1079 } 1079 dev_err(jrdev, "unable to map src\n");
1080 goto unmap;
1081 }
1080 1082
1081 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1083 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1084 }
1082 1085
1083 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1086 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1084 digestsize); 1087 digestsize);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index ec10230178c5..4b6854bf896a 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,6 +1155,7 @@
1155#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) 1155#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
1156#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) 1156#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
1157#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) 1157#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
1158#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
1158#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) 1159#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
1159#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) 1160#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
1160#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) 1161#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 67ea94079837..8c6b83e02a70 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -7,6 +7,9 @@
7 7
8#ifndef CAAM_ERROR_H 8#ifndef CAAM_ERROR_H
9#define CAAM_ERROR_H 9#define CAAM_ERROR_H
10
11#include "desc.h"
12
10#define CAAM_ERROR_STR_MAX 302 13#define CAAM_ERROR_STR_MAX 302
11 14
12void caam_strstatus(struct device *dev, u32 status, bool qi_v2); 15void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
@@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
17void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 20void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
18 int rowsize, int groupsize, struct scatterlist *sg, 21 int rowsize, int groupsize, struct scatterlist *sg,
19 size_t tlen, bool ascii); 22 size_t tlen, bool ascii);
23
24static inline bool is_mdha(u32 algtype)
25{
26 return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
27 OP_ALG_CHA_MDHA;
28}
20#endif /* CAAM_ERROR_H */ 29#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 06ad85ab5e86..a876535529d1 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
278 mcode->num_cores = is_ae ? 6 : 10; 278 mcode->num_cores = is_ae ? 6 : 10;
279 279
280 /* Allocate DMAable space */ 280 /* Allocate DMAable space */
281 mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, 281 mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
282 &mcode->phys_base, GFP_KERNEL); 282 &mcode->phys_base, GFP_KERNEL);
283 if (!mcode->code) { 283 if (!mcode->code) {
284 dev_err(dev, "Unable to allocate space for microcode"); 284 dev_err(dev, "Unable to allocate space for microcode");
285 ret = -ENOMEM; 285 ret = -ENOMEM;
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 5c796ed55eba..2ca431ed1db8 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf,
236 236
237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : 237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
238 rem_q_size; 238 rem_q_size;
239 curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev, 239 curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
240 c_size + CPT_NEXT_CHUNK_PTR_SIZE, 240 c_size + CPT_NEXT_CHUNK_PTR_SIZE,
241 &curr->dma_addr, GFP_KERNEL); 241 &curr->dma_addr,
242 GFP_KERNEL);
242 if (!curr->head) { 243 if (!curr->head) {
243 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", 244 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
244 i, queue->nchunks); 245 i, queue->nchunks);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 9138bae12521..4ace9bcd603a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
25 struct nitrox_device *ndev = cmdq->ndev; 25 struct nitrox_device *ndev = cmdq->ndev;
26 26
27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; 27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
28 cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, 28 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
29 &cmdq->unalign_dma, 29 &cmdq->unalign_dma,
30 GFP_KERNEL); 30 GFP_KERNEL);
31 if (!cmdq->unalign_base) 31 if (!cmdq->unalign_base)
32 return -ENOMEM; 32 return -ENOMEM;
33 33
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index e34e4df8fd24..4c97478d44bd 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
537 struct nitrox_device *ndev = cmdq->ndev; 537 struct nitrox_device *ndev = cmdq->ndev;
538 struct nitrox_softreq *sr; 538 struct nitrox_softreq *sr;
539 int req_completed = 0, err = 0, budget; 539 int req_completed = 0, err = 0, budget;
540 completion_t callback;
541 void *cb_arg;
540 542
541 /* check all pending requests */ 543 /* check all pending requests */
542 budget = atomic_read(&cmdq->pending_count); 544 budget = atomic_read(&cmdq->pending_count);
@@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
564 smp_mb__after_atomic(); 566 smp_mb__after_atomic();
565 /* remove from response list */ 567 /* remove from response list */
566 response_list_del(sr, cmdq); 568 response_list_del(sr, cmdq);
567
568 /* ORH error code */ 569 /* ORH error code */
569 err = READ_ONCE(*sr->resp.orh) & 0xff; 570 err = READ_ONCE(*sr->resp.orh) & 0xff;
571 callback = sr->callback;
572 cb_arg = sr->cb_arg;
570 softreq_destroy(sr); 573 softreq_destroy(sr);
571 574 if (callback)
572 if (sr->callback) 575 callback(cb_arg, err);
573 sr->callback(sr->cb_arg, err);
574 576
575 req_completed++; 577 req_completed++;
576 } 578 }
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 44a4d2779b15..c9bfd4f439ce 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp)
822 /* Page alignment satisfies our needs for N <= 128 */ 822 /* Page alignment satisfies our needs for N <= 128 */
823 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); 823 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
824 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); 824 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
825 cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, 825 cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
826 &cmd_q->qbase_dma, 826 &cmd_q->qbase_dma,
827 GFP_KERNEL); 827 GFP_KERNEL);
828 if (!cmd_q->qbase) { 828 if (!cmd_q->qbase) {
829 dev_err(dev, "unable to allocate command queue\n"); 829 dev_err(dev, "unable to allocate command queue\n");
830 ret = -ENOMEM; 830 ret = -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index f2643cda45db..a3527c00b29a 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
549 unsigned int keylen) 549 unsigned int keylen)
550{ 550{
551 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 551 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
552 struct rtattr *rta = (struct rtattr *)key;
553 struct cc_crypto_req cc_req = {}; 552 struct cc_crypto_req cc_req = {};
554 struct crypto_authenc_key_param *param;
555 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; 553 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
556 int rc = -EINVAL;
557 unsigned int seq_len = 0; 554 unsigned int seq_len = 0;
558 struct device *dev = drvdata_to_dev(ctx->drvdata); 555 struct device *dev = drvdata_to_dev(ctx->drvdata);
556 const u8 *enckey, *authkey;
557 int rc;
559 558
560 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", 559 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
561 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); 560 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
563 /* STAT_PHASE_0: Init and sanity checks */ 562 /* STAT_PHASE_0: Init and sanity checks */
564 563
565 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ 564 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
566 if (!RTA_OK(rta, keylen)) 565 struct crypto_authenc_keys keys;
567 goto badkey; 566
568 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 567 rc = crypto_authenc_extractkeys(&keys, key, keylen);
569 goto badkey; 568 if (rc)
570 if (RTA_PAYLOAD(rta) < sizeof(*param))
571 goto badkey;
572 param = RTA_DATA(rta);
573 ctx->enc_keylen = be32_to_cpu(param->enckeylen);
574 key += RTA_ALIGN(rta->rta_len);
575 keylen -= RTA_ALIGN(rta->rta_len);
576 if (keylen < ctx->enc_keylen)
577 goto badkey; 569 goto badkey;
578 ctx->auth_keylen = keylen - ctx->enc_keylen; 570 enckey = keys.enckey;
571 authkey = keys.authkey;
572 ctx->enc_keylen = keys.enckeylen;
573 ctx->auth_keylen = keys.authkeylen;
579 574
580 if (ctx->cipher_mode == DRV_CIPHER_CTR) { 575 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
581 /* the nonce is stored in bytes at end of key */ 576 /* the nonce is stored in bytes at end of key */
577 rc = -EINVAL;
582 if (ctx->enc_keylen < 578 if (ctx->enc_keylen <
583 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) 579 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
584 goto badkey; 580 goto badkey;
585 /* Copy nonce from last 4 bytes in CTR key to 581 /* Copy nonce from last 4 bytes in CTR key to
586 * first 4 bytes in CTR IV 582 * first 4 bytes in CTR IV
587 */ 583 */
588 memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + 584 memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
589 ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, 585 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
590 CTR_RFC3686_NONCE_SIZE);
591 /* Set CTR key size */ 586 /* Set CTR key size */
592 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; 587 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
593 } 588 }
594 } else { /* non-authenc - has just one key */ 589 } else { /* non-authenc - has just one key */
590 enckey = key;
591 authkey = NULL;
595 ctx->enc_keylen = keylen; 592 ctx->enc_keylen = keylen;
596 ctx->auth_keylen = 0; 593 ctx->auth_keylen = 0;
597 } 594 }
@@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
603 /* STAT_PHASE_1: Copy key to ctx */ 600 /* STAT_PHASE_1: Copy key to ctx */
604 601
605 /* Get key material */ 602 /* Get key material */
606 memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); 603 memcpy(ctx->enckey, enckey, ctx->enc_keylen);
607 if (ctx->enc_keylen == 24) 604 if (ctx->enc_keylen == 24)
608 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); 605 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
609 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { 606 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
610 memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); 607 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
608 ctx->auth_keylen);
611 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ 609 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
612 rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); 610 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
613 if (rc) 611 if (rc)
614 goto badkey; 612 goto badkey;
615 } 613 }
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8ada308d72ee..b0125ad65825 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
380 rc = cc_ivgen_init(new_drvdata); 380 rc = cc_ivgen_init(new_drvdata);
381 if (rc) { 381 if (rc) {
382 dev_err(dev, "cc_ivgen_init failed\n"); 382 dev_err(dev, "cc_ivgen_init failed\n");
383 goto post_power_mgr_err; 383 goto post_buf_mgr_err;
384 } 384 }
385 385
386 /* Allocate crypto algs */ 386 /* Allocate crypto algs */
@@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
403 goto post_hash_err; 403 goto post_hash_err;
404 } 404 }
405 405
406 /* All set, we can allow autosuspend */
407 cc_pm_go(new_drvdata);
408
406 /* If we got here and FIPS mode is enabled 409 /* If we got here and FIPS mode is enabled
407 * it means all FIPS test passed, so let TEE 410 * it means all FIPS test passed, so let TEE
408 * know we're good. 411 * know we're good.
@@ -417,8 +420,6 @@ post_cipher_err:
417 cc_cipher_free(new_drvdata); 420 cc_cipher_free(new_drvdata);
418post_ivgen_err: 421post_ivgen_err:
419 cc_ivgen_fini(new_drvdata); 422 cc_ivgen_fini(new_drvdata);
420post_power_mgr_err:
421 cc_pm_fini(new_drvdata);
422post_buf_mgr_err: 423post_buf_mgr_err:
423 cc_buffer_mgr_fini(new_drvdata); 424 cc_buffer_mgr_fini(new_drvdata);
424post_req_mgr_err: 425post_req_mgr_err:
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index d990f472e89f..6ff7e75ad90e 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev)
100 100
101int cc_pm_init(struct cc_drvdata *drvdata) 101int cc_pm_init(struct cc_drvdata *drvdata)
102{ 102{
103 int rc = 0;
104 struct device *dev = drvdata_to_dev(drvdata); 103 struct device *dev = drvdata_to_dev(drvdata);
105 104
106 /* must be before the enabling to avoid resdundent suspending */ 105 /* must be before the enabling to avoid resdundent suspending */
107 pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); 106 pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
108 pm_runtime_use_autosuspend(dev); 107 pm_runtime_use_autosuspend(dev);
109 /* activate the PM module */ 108 /* activate the PM module */
110 rc = pm_runtime_set_active(dev); 109 return pm_runtime_set_active(dev);
111 if (rc) 110}
112 return rc;
113 /* enable the PM module*/
114 pm_runtime_enable(dev);
115 111
116 return rc; 112/* enable the PM module*/
113void cc_pm_go(struct cc_drvdata *drvdata)
114{
115 pm_runtime_enable(drvdata_to_dev(drvdata));
117} 116}
118 117
119void cc_pm_fini(struct cc_drvdata *drvdata) 118void cc_pm_fini(struct cc_drvdata *drvdata)
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 020a5403c58b..f62624357020 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -16,6 +16,7 @@
16extern const struct dev_pm_ops ccree_pm; 16extern const struct dev_pm_ops ccree_pm;
17 17
18int cc_pm_init(struct cc_drvdata *drvdata); 18int cc_pm_init(struct cc_drvdata *drvdata);
19void cc_pm_go(struct cc_drvdata *drvdata);
19void cc_pm_fini(struct cc_drvdata *drvdata); 20void cc_pm_fini(struct cc_drvdata *drvdata);
20int cc_pm_suspend(struct device *dev); 21int cc_pm_suspend(struct device *dev);
21int cc_pm_resume(struct device *dev); 22int cc_pm_resume(struct device *dev);
@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
29 return 0; 30 return 0;
30} 31}
31 32
33static void cc_pm_go(struct cc_drvdata *drvdata) {}
34
32static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} 35static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
33 36
34static inline int cc_pm_suspend(struct device *dev) 37static inline int cc_pm_suspend(struct device *dev)
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index cdc4f9a171d9..adc0cd8ae97b 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
241 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); 241 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
242 } else { 242 } else {
243 /* new key */ 243 /* new key */
244 ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, 244 ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
245 &ctx->pkey, GFP_KERNEL); 245 &ctx->pkey, GFP_KERNEL);
246 if (!ctx->key) { 246 if (!ctx->key) {
247 mutex_unlock(&ctx->lock); 247 mutex_unlock(&ctx->lock);
248 return -ENOMEM; 248 return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index c1ee4e7bf996..91ee2bb575df 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
1082 struct sec_queue_ring_db *ring_db = &queue->ring_db; 1082 struct sec_queue_ring_db *ring_db = &queue->ring_db;
1083 int ret; 1083 int ret;
1084 1084
1085 ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE, 1085 ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
1086 &ring_cmd->paddr, 1086 &ring_cmd->paddr, GFP_KERNEL);
1087 GFP_KERNEL);
1088 if (!ring_cmd->vaddr) 1087 if (!ring_cmd->vaddr)
1089 return -ENOMEM; 1088 return -ENOMEM;
1090 1089
@@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
1092 mutex_init(&ring_cmd->lock); 1091 mutex_init(&ring_cmd->lock);
1093 ring_cmd->callback = sec_alg_callback; 1092 ring_cmd->callback = sec_alg_callback;
1094 1093
1095 ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE, 1094 ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
1096 &ring_cq->paddr, 1095 &ring_cq->paddr, GFP_KERNEL);
1097 GFP_KERNEL);
1098 if (!ring_cq->vaddr) { 1096 if (!ring_cq->vaddr) {
1099 ret = -ENOMEM; 1097 ret = -ENOMEM;
1100 goto err_free_ring_cmd; 1098 goto err_free_ring_cmd;
1101 } 1099 }
1102 1100
1103 ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE, 1101 ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
1104 &ring_db->paddr, 1102 &ring_db->paddr, GFP_KERNEL);
1105 GFP_KERNEL);
1106 if (!ring_db->vaddr) { 1103 if (!ring_db->vaddr) {
1107 ret = -ENOMEM; 1104 ret = -ENOMEM;
1108 goto err_free_ring_cq; 1105 goto err_free_ring_cq;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 19fba998b86b..1b0d156bb9be 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,9 +260,9 @@ static int setup_crypt_desc(void)
260{ 260{
261 struct device *dev = &pdev->dev; 261 struct device *dev = &pdev->dev;
262 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 262 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
263 crypt_virt = dma_zalloc_coherent(dev, 263 crypt_virt = dma_alloc_coherent(dev,
264 NPE_QLEN * sizeof(struct crypt_ctl), 264 NPE_QLEN * sizeof(struct crypt_ctl),
265 &crypt_phys, GFP_ATOMIC); 265 &crypt_phys, GFP_ATOMIC);
266 if (!crypt_virt) 266 if (!crypt_virt)
267 return -ENOMEM; 267 return -ENOMEM;
268 return 0; 268 return 0;
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index ee0404e27a0f..5660e5e5e022 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
453 if (!ring[i]) 453 if (!ring[i])
454 goto err_cleanup; 454 goto err_cleanup;
455 455
456 ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, 456 ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
457 MTK_DESC_RING_SZ, 457 MTK_DESC_RING_SZ,
458 &ring[i]->cmd_dma, 458 &ring[i]->cmd_dma,
459 GFP_KERNEL); 459 GFP_KERNEL);
460 if (!ring[i]->cmd_base) 460 if (!ring[i]->cmd_base)
461 goto err_cleanup; 461 goto err_cleanup;
462 462
463 ring[i]->res_base = dma_zalloc_coherent(cryp->dev, 463 ring[i]->res_base = dma_alloc_coherent(cryp->dev,
464 MTK_DESC_RING_SZ, 464 MTK_DESC_RING_SZ,
465 &ring[i]->res_dma, 465 &ring[i]->res_dma,
466 GFP_KERNEL); 466 GFP_KERNEL);
467 if (!ring[i]->res_base) 467 if (!ring[i]->res_base)
468 goto err_cleanup; 468 goto err_cleanup;
469 469
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 3744b22f0c46..d28cba34773e 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
244 dev_to_node(&GET_DEV(accel_dev))); 244 dev_to_node(&GET_DEV(accel_dev)));
245 if (!admin) 245 if (!admin)
246 return -ENOMEM; 246 return -ENOMEM;
247 admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 247 admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
248 &admin->phy_addr, GFP_KERNEL); 248 &admin->phy_addr, GFP_KERNEL);
249 if (!admin->virt_addr) { 249 if (!admin->virt_addr) {
250 dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); 250 dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
251 kfree(admin); 251 kfree(admin);
252 return -ENOMEM; 252 return -ENOMEM;
253 } 253 }
254 254
255 admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), 255 admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
256 PAGE_SIZE, 256 PAGE_SIZE,
257 &admin->const_tbl_addr, 257 &admin->const_tbl_addr,
258 GFP_KERNEL); 258 GFP_KERNEL);
259 if (!admin->virt_tbl_addr) { 259 if (!admin->virt_tbl_addr) {
260 dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); 260 dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
261 dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 261 dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index d2698299896f..975c75198f56 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
601 601
602 dev = &GET_DEV(inst->accel_dev); 602 dev = &GET_DEV(inst->accel_dev);
603 ctx->inst = inst; 603 ctx->inst = inst;
604 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 604 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
605 &ctx->enc_cd_paddr, 605 &ctx->enc_cd_paddr,
606 GFP_ATOMIC); 606 GFP_ATOMIC);
607 if (!ctx->enc_cd) { 607 if (!ctx->enc_cd) {
608 return -ENOMEM; 608 return -ENOMEM;
609 } 609 }
610 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 610 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
611 &ctx->dec_cd_paddr, 611 &ctx->dec_cd_paddr,
612 GFP_ATOMIC); 612 GFP_ATOMIC);
613 if (!ctx->dec_cd) { 613 if (!ctx->dec_cd) {
614 goto out_free_enc; 614 goto out_free_enc;
615 } 615 }
@@ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
933 933
934 dev = &GET_DEV(inst->accel_dev); 934 dev = &GET_DEV(inst->accel_dev);
935 ctx->inst = inst; 935 ctx->inst = inst;
936 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 936 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
937 &ctx->enc_cd_paddr, 937 &ctx->enc_cd_paddr,
938 GFP_ATOMIC); 938 GFP_ATOMIC);
939 if (!ctx->enc_cd) { 939 if (!ctx->enc_cd) {
940 spin_unlock(&ctx->lock); 940 spin_unlock(&ctx->lock);
941 return -ENOMEM; 941 return -ENOMEM;
942 } 942 }
943 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 943 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
944 &ctx->dec_cd_paddr, 944 &ctx->dec_cd_paddr,
945 GFP_ATOMIC); 945 GFP_ATOMIC);
946 if (!ctx->dec_cd) { 946 if (!ctx->dec_cd) {
947 spin_unlock(&ctx->lock); 947 spin_unlock(&ctx->lock);
948 goto out_free_enc; 948 goto out_free_enc;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 320e7854b4ee..c9f324730d71 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
332 } else { 332 } else {
333 int shift = ctx->p_size - req->src_len; 333 int shift = ctx->p_size - req->src_len;
334 334
335 qat_req->src_align = dma_zalloc_coherent(dev, 335 qat_req->src_align = dma_alloc_coherent(dev,
336 ctx->p_size, 336 ctx->p_size,
337 &qat_req->in.dh.in.b, 337 &qat_req->in.dh.in.b,
338 GFP_KERNEL); 338 GFP_KERNEL);
339 if (unlikely(!qat_req->src_align)) 339 if (unlikely(!qat_req->src_align))
340 return ret; 340 return ret;
341 341
@@ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req)
360 goto unmap_src; 360 goto unmap_src;
361 361
362 } else { 362 } else {
363 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, 363 qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
364 &qat_req->out.dh.r, 364 &qat_req->out.dh.r,
365 GFP_KERNEL); 365 GFP_KERNEL);
366 if (unlikely(!qat_req->dst_align)) 366 if (unlikely(!qat_req->dst_align))
367 goto unmap_src; 367 goto unmap_src;
368 } 368 }
@@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
447 return -EINVAL; 447 return -EINVAL;
448 448
449 ctx->p_size = params->p_size; 449 ctx->p_size = params->p_size;
450 ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); 450 ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
451 if (!ctx->p) 451 if (!ctx->p)
452 return -ENOMEM; 452 return -ENOMEM;
453 memcpy(ctx->p, params->p, ctx->p_size); 453 memcpy(ctx->p, params->p, ctx->p_size);
@@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
458 return 0; 458 return 0;
459 } 459 }
460 460
461 ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); 461 ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
462 if (!ctx->g) 462 if (!ctx->g)
463 return -ENOMEM; 463 return -ENOMEM;
464 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, 464 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
@@ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
503 if (ret < 0) 503 if (ret < 0)
504 goto err_clear_ctx; 504 goto err_clear_ctx;
505 505
506 ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, 506 ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
507 GFP_KERNEL); 507 GFP_KERNEL);
508 if (!ctx->xa) { 508 if (!ctx->xa) {
509 ret = -ENOMEM; 509 ret = -ENOMEM;
510 goto err_clear_ctx; 510 goto err_clear_ctx;
@@ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
737 } else { 737 } else {
738 int shift = ctx->key_sz - req->src_len; 738 int shift = ctx->key_sz - req->src_len;
739 739
740 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 740 qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
741 &qat_req->in.rsa.enc.m, 741 &qat_req->in.rsa.enc.m,
742 GFP_KERNEL); 742 GFP_KERNEL);
743 if (unlikely(!qat_req->src_align)) 743 if (unlikely(!qat_req->src_align))
744 return ret; 744 return ret;
745 745
@@ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
756 goto unmap_src; 756 goto unmap_src;
757 757
758 } else { 758 } else {
759 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 759 qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
760 &qat_req->out.rsa.enc.c, 760 &qat_req->out.rsa.enc.c,
761 GFP_KERNEL); 761 GFP_KERNEL);
762 if (unlikely(!qat_req->dst_align)) 762 if (unlikely(!qat_req->dst_align))
763 goto unmap_src; 763 goto unmap_src;
764 764
@@ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
881 } else { 881 } else {
882 int shift = ctx->key_sz - req->src_len; 882 int shift = ctx->key_sz - req->src_len;
883 883
884 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 884 qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
885 &qat_req->in.rsa.dec.c, 885 &qat_req->in.rsa.dec.c,
886 GFP_KERNEL); 886 GFP_KERNEL);
887 if (unlikely(!qat_req->src_align)) 887 if (unlikely(!qat_req->src_align))
888 return ret; 888 return ret;
889 889
@@ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
900 goto unmap_src; 900 goto unmap_src;
901 901
902 } else { 902 } else {
903 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 903 qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
904 &qat_req->out.rsa.dec.m, 904 &qat_req->out.rsa.dec.m,
905 GFP_KERNEL); 905 GFP_KERNEL);
906 if (unlikely(!qat_req->dst_align)) 906 if (unlikely(!qat_req->dst_align))
907 goto unmap_src; 907 goto unmap_src;
908 908
@@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
989 goto err; 989 goto err;
990 990
991 ret = -ENOMEM; 991 ret = -ENOMEM;
992 ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); 992 ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
993 if (!ctx->n) 993 if (!ctx->n)
994 goto err; 994 goto err;
995 995
@@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
1018 return -EINVAL; 1018 return -EINVAL;
1019 } 1019 }
1020 1020
1021 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); 1021 ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
1022 if (!ctx->e) 1022 if (!ctx->e)
1023 return -ENOMEM; 1023 return -ENOMEM;
1024 1024
@@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
1044 goto err; 1044 goto err;
1045 1045
1046 ret = -ENOMEM; 1046 ret = -ENOMEM;
1047 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); 1047 ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
1048 if (!ctx->d) 1048 if (!ctx->d)
1049 goto err; 1049 goto err;
1050 1050
@@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1077 qat_rsa_drop_leading_zeros(&ptr, &len); 1077 qat_rsa_drop_leading_zeros(&ptr, &len);
1078 if (!len) 1078 if (!len)
1079 goto err; 1079 goto err;
1080 ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); 1080 ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1081 if (!ctx->p) 1081 if (!ctx->p)
1082 goto err; 1082 goto err;
1083 memcpy(ctx->p + (half_key_sz - len), ptr, len); 1083 memcpy(ctx->p + (half_key_sz - len), ptr, len);
@@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1088 qat_rsa_drop_leading_zeros(&ptr, &len); 1088 qat_rsa_drop_leading_zeros(&ptr, &len);
1089 if (!len) 1089 if (!len)
1090 goto free_p; 1090 goto free_p;
1091 ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); 1091 ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1092 if (!ctx->q) 1092 if (!ctx->q)
1093 goto free_p; 1093 goto free_p;
1094 memcpy(ctx->q + (half_key_sz - len), ptr, len); 1094 memcpy(ctx->q + (half_key_sz - len), ptr, len);
@@ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1099 qat_rsa_drop_leading_zeros(&ptr, &len); 1099 qat_rsa_drop_leading_zeros(&ptr, &len);
1100 if (!len) 1100 if (!len)
1101 goto free_q; 1101 goto free_q;
1102 ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, 1102 ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1103 GFP_KERNEL); 1103 GFP_KERNEL);
1104 if (!ctx->dp) 1104 if (!ctx->dp)
1105 goto free_q; 1105 goto free_q;
1106 memcpy(ctx->dp + (half_key_sz - len), ptr, len); 1106 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
@@ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1111 qat_rsa_drop_leading_zeros(&ptr, &len); 1111 qat_rsa_drop_leading_zeros(&ptr, &len);
1112 if (!len) 1112 if (!len)
1113 goto free_dp; 1113 goto free_dp;
1114 ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, 1114 ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1115 GFP_KERNEL); 1115 GFP_KERNEL);
1116 if (!ctx->dq) 1116 if (!ctx->dq)
1117 goto free_dp; 1117 goto free_dp;
1118 memcpy(ctx->dq + (half_key_sz - len), ptr, len); 1118 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
@@ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1123 qat_rsa_drop_leading_zeros(&ptr, &len); 1123 qat_rsa_drop_leading_zeros(&ptr, &len);
1124 if (!len) 1124 if (!len)
1125 goto free_dq; 1125 goto free_dq;
1126 ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, 1126 ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1127 GFP_KERNEL); 1127 GFP_KERNEL);
1128 if (!ctx->qinv) 1128 if (!ctx->qinv)
1129 goto free_dq; 1129 goto free_dq;
1130 memcpy(ctx->qinv + (half_key_sz - len), ptr, len); 1130 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 45e20707cef8..f8e2c5c3f4eb 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1361 struct talitos_private *priv = dev_get_drvdata(dev); 1361 struct talitos_private *priv = dev_get_drvdata(dev);
1362 bool is_sec1 = has_ftr_sec1(priv); 1362 bool is_sec1 = has_ftr_sec1(priv);
1363 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; 1363 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1364 void *err;
1365 1364
1366 if (cryptlen + authsize > max_len) { 1365 if (cryptlen + authsize > max_len) {
1367 dev_err(dev, "length exceeds h/w max limit\n"); 1366 dev_err(dev, "length exceeds h/w max limit\n");
1368 return ERR_PTR(-EINVAL); 1367 return ERR_PTR(-EINVAL);
1369 } 1368 }
1370 1369
1371 if (ivsize)
1372 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1373
1374 if (!dst || dst == src) { 1370 if (!dst || dst == src) {
1375 src_len = assoclen + cryptlen + authsize; 1371 src_len = assoclen + cryptlen + authsize;
1376 src_nents = sg_nents_for_len(src, src_len); 1372 src_nents = sg_nents_for_len(src, src_len);
1377 if (src_nents < 0) { 1373 if (src_nents < 0) {
1378 dev_err(dev, "Invalid number of src SG.\n"); 1374 dev_err(dev, "Invalid number of src SG.\n");
1379 err = ERR_PTR(-EINVAL); 1375 return ERR_PTR(-EINVAL);
1380 goto error_sg;
1381 } 1376 }
1382 src_nents = (src_nents == 1) ? 0 : src_nents; 1377 src_nents = (src_nents == 1) ? 0 : src_nents;
1383 dst_nents = dst ? src_nents : 0; 1378 dst_nents = dst ? src_nents : 0;
@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1387 src_nents = sg_nents_for_len(src, src_len); 1382 src_nents = sg_nents_for_len(src, src_len);
1388 if (src_nents < 0) { 1383 if (src_nents < 0) {
1389 dev_err(dev, "Invalid number of src SG.\n"); 1384 dev_err(dev, "Invalid number of src SG.\n");
1390 err = ERR_PTR(-EINVAL); 1385 return ERR_PTR(-EINVAL);
1391 goto error_sg;
1392 } 1386 }
1393 src_nents = (src_nents == 1) ? 0 : src_nents; 1387 src_nents = (src_nents == 1) ? 0 : src_nents;
1394 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); 1388 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1395 dst_nents = sg_nents_for_len(dst, dst_len); 1389 dst_nents = sg_nents_for_len(dst, dst_len);
1396 if (dst_nents < 0) { 1390 if (dst_nents < 0) {
1397 dev_err(dev, "Invalid number of dst SG.\n"); 1391 dev_err(dev, "Invalid number of dst SG.\n");
1398 err = ERR_PTR(-EINVAL); 1392 return ERR_PTR(-EINVAL);
1399 goto error_sg;
1400 } 1393 }
1401 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1394 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1402 } 1395 }
@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1423 /* if its a ahash, add space for a second desc next to the first one */ 1416 /* if its a ahash, add space for a second desc next to the first one */
1424 if (is_sec1 && !dst) 1417 if (is_sec1 && !dst)
1425 alloc_len += sizeof(struct talitos_desc); 1418 alloc_len += sizeof(struct talitos_desc);
1419 alloc_len += ivsize;
1426 1420
1427 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1421 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1428 if (!edesc) { 1422 if (!edesc)
1429 err = ERR_PTR(-ENOMEM); 1423 return ERR_PTR(-ENOMEM);
1430 goto error_sg; 1424 if (ivsize) {
1425 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1426 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1431 } 1427 }
1432 memset(&edesc->desc, 0, sizeof(edesc->desc)); 1428 memset(&edesc->desc, 0, sizeof(edesc->desc));
1433 1429
@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1445 DMA_BIDIRECTIONAL); 1441 DMA_BIDIRECTIONAL);
1446 } 1442 }
1447 return edesc; 1443 return edesc;
1448error_sg:
1449 if (iv_dma)
1450 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1451 return err;
1452} 1444}
1453 1445
1454static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1446static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 4e557684f792..fe69dccfa0c0 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -203,6 +203,7 @@ struct at_xdmac_chan {
203 u32 save_cim; 203 u32 save_cim;
204 u32 save_cnda; 204 u32 save_cnda;
205 u32 save_cndc; 205 u32 save_cndc;
206 u32 irq_status;
206 unsigned long status; 207 unsigned long status;
207 struct tasklet_struct tasklet; 208 struct tasklet_struct tasklet;
208 struct dma_slave_config sconfig; 209 struct dma_slave_config sconfig;
@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
1580 struct at_xdmac_desc *desc; 1581 struct at_xdmac_desc *desc;
1581 u32 error_mask; 1582 u32 error_mask;
1582 1583
1583 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", 1584 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1584 __func__, atchan->status); 1585 __func__, atchan->irq_status);
1585 1586
1586 error_mask = AT_XDMAC_CIS_RBEIS 1587 error_mask = AT_XDMAC_CIS_RBEIS
1587 | AT_XDMAC_CIS_WBEIS 1588 | AT_XDMAC_CIS_WBEIS
@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
1589 1590
1590 if (at_xdmac_chan_is_cyclic(atchan)) { 1591 if (at_xdmac_chan_is_cyclic(atchan)) {
1591 at_xdmac_handle_cyclic(atchan); 1592 at_xdmac_handle_cyclic(atchan);
1592 } else if ((atchan->status & AT_XDMAC_CIS_LIS) 1593 } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1593 || (atchan->status & error_mask)) { 1594 || (atchan->irq_status & error_mask)) {
1594 struct dma_async_tx_descriptor *txd; 1595 struct dma_async_tx_descriptor *txd;
1595 1596
1596 if (atchan->status & AT_XDMAC_CIS_RBEIS) 1597 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1597 dev_err(chan2dev(&atchan->chan), "read bus error!!!"); 1598 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1598 if (atchan->status & AT_XDMAC_CIS_WBEIS) 1599 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1599 dev_err(chan2dev(&atchan->chan), "write bus error!!!"); 1600 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1600 if (atchan->status & AT_XDMAC_CIS_ROIS) 1601 if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1601 dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); 1602 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1602 1603
1603 spin_lock(&atchan->lock); 1604 spin_lock(&atchan->lock);
@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1652 atchan = &atxdmac->chan[i]; 1653 atchan = &atxdmac->chan[i];
1653 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); 1654 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1654 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); 1655 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1655 atchan->status = chan_status & chan_imr; 1656 atchan->irq_status = chan_status & chan_imr;
1656 dev_vdbg(atxdmac->dma.dev, 1657 dev_vdbg(atxdmac->dma.dev,
1657 "%s: chan%d: imr=0x%x, status=0x%x\n", 1658 "%s: chan%d: imr=0x%x, status=0x%x\n",
1658 __func__, i, chan_imr, chan_status); 1659 __func__, i, chan_imr, chan_status);
@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1666 at_xdmac_chan_read(atchan, AT_XDMAC_CDA), 1667 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1667 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); 1668 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1668 1669
1669 if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) 1670 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1670 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1671 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1671 1672
1672 tasklet_schedule(&atchan->tasklet); 1673 tasklet_schedule(&atchan->tasklet);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 1a44c8086d77..ae10f5614f95 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
406 } 406 }
407} 407}
408 408
409static int bcm2835_dma_abort(void __iomem *chan_base) 409static int bcm2835_dma_abort(struct bcm2835_chan *c)
410{ 410{
411 unsigned long cs; 411 void __iomem *chan_base = c->chan_base;
412 long int timeout = 10000; 412 long int timeout = 10000;
413 413
414 cs = readl(chan_base + BCM2835_DMA_CS); 414 /*
415 if (!(cs & BCM2835_DMA_ACTIVE)) 415 * A zero control block address means the channel is idle.
416 * (The ACTIVE flag in the CS register is not a reliable indicator.)
417 */
418 if (!readl(chan_base + BCM2835_DMA_ADDR))
416 return 0; 419 return 0;
417 420
418 /* Write 0 to the active bit - Pause the DMA */ 421 /* Write 0 to the active bit - Pause the DMA */
419 writel(0, chan_base + BCM2835_DMA_CS); 422 writel(0, chan_base + BCM2835_DMA_CS);
420 423
421 /* Wait for any current AXI transfer to complete */ 424 /* Wait for any current AXI transfer to complete */
422 while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { 425 while ((readl(chan_base + BCM2835_DMA_CS) &
426 BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
423 cpu_relax(); 427 cpu_relax();
424 cs = readl(chan_base + BCM2835_DMA_CS);
425 }
426 428
427 /* We'll un-pause when we set of our next DMA */ 429 /* Peripheral might be stuck and fail to signal AXI write responses */
428 if (!timeout) 430 if (!timeout)
429 return -ETIMEDOUT; 431 dev_err(c->vc.chan.device->dev,
430 432 "failed to complete outstanding writes\n");
431 if (!(cs & BCM2835_DMA_ACTIVE))
432 return 0;
433
434 /* Terminate the control block chain */
435 writel(0, chan_base + BCM2835_DMA_NEXTCB);
436
437 /* Abort the whole DMA */
438 writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
439 chan_base + BCM2835_DMA_CS);
440 433
434 writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
441 return 0; 435 return 0;
442} 436}
443 437
@@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
476 470
477 spin_lock_irqsave(&c->vc.lock, flags); 471 spin_lock_irqsave(&c->vc.lock, flags);
478 472
479 /* Acknowledge interrupt */ 473 /*
480 writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); 474 * Clear the INT flag to receive further interrupts. Keep the channel
475 * active in case the descriptor is cyclic or in case the client has
476 * already terminated the descriptor and issued a new one. (May happen
477 * if this IRQ handler is threaded.) If the channel is finished, it
478 * will remain idle despite the ACTIVE flag being set.
479 */
480 writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
481 c->chan_base + BCM2835_DMA_CS);
481 482
482 d = c->desc; 483 d = c->desc;
483 484
@@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
485 if (d->cyclic) { 486 if (d->cyclic) {
486 /* call the cyclic callback */ 487 /* call the cyclic callback */
487 vchan_cyclic_callback(&d->vd); 488 vchan_cyclic_callback(&d->vd);
488 489 } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
489 /* Keep the DMA engine running */
490 writel(BCM2835_DMA_ACTIVE,
491 c->chan_base + BCM2835_DMA_CS);
492 } else {
493 vchan_cookie_complete(&c->desc->vd); 490 vchan_cookie_complete(&c->desc->vd);
494 bcm2835_dma_start_desc(c); 491 bcm2835_dma_start_desc(c);
495 } 492 }
@@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
779 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 776 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
780 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); 777 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
781 unsigned long flags; 778 unsigned long flags;
782 int timeout = 10000;
783 LIST_HEAD(head); 779 LIST_HEAD(head);
784 780
785 spin_lock_irqsave(&c->vc.lock, flags); 781 spin_lock_irqsave(&c->vc.lock, flags);
@@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
789 list_del_init(&c->node); 785 list_del_init(&c->node);
790 spin_unlock(&d->lock); 786 spin_unlock(&d->lock);
791 787
792 /* 788 /* stop DMA activity */
793 * Stop DMA activity: we assume the callback will not be called
794 * after bcm_dma_abort() returns (even if it does, it will see
795 * c->desc is NULL and exit.)
796 */
797 if (c->desc) { 789 if (c->desc) {
798 vchan_terminate_vdesc(&c->desc->vd); 790 vchan_terminate_vdesc(&c->desc->vd);
799 c->desc = NULL; 791 c->desc = NULL;
800 bcm2835_dma_abort(c->chan_base); 792 bcm2835_dma_abort(c);
801
802 /* Wait for stopping */
803 while (--timeout) {
804 if (!(readl(c->chan_base + BCM2835_DMA_CS) &
805 BCM2835_DMA_ACTIVE))
806 break;
807
808 cpu_relax();
809 }
810
811 if (!timeout)
812 dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
813 } 793 }
814 794
815 vchan_get_all_descriptors(&c->vc, &head); 795 vchan_get_all_descriptors(&c->vc, &head);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 2eea4ef72915..6511928b4cdf 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -711,11 +711,9 @@ static int dmatest_func(void *data)
711 srcs[i] = um->addr[i] + src_off; 711 srcs[i] = um->addr[i] + src_off;
712 ret = dma_mapping_error(dev->dev, um->addr[i]); 712 ret = dma_mapping_error(dev->dev, um->addr[i]);
713 if (ret) { 713 if (ret) {
714 dmaengine_unmap_put(um);
715 result("src mapping error", total_tests, 714 result("src mapping error", total_tests,
716 src_off, dst_off, len, ret); 715 src_off, dst_off, len, ret);
717 failed_tests++; 716 goto error_unmap_continue;
718 continue;
719 } 717 }
720 um->to_cnt++; 718 um->to_cnt++;
721 } 719 }
@@ -730,11 +728,9 @@ static int dmatest_func(void *data)
730 DMA_BIDIRECTIONAL); 728 DMA_BIDIRECTIONAL);
731 ret = dma_mapping_error(dev->dev, dsts[i]); 729 ret = dma_mapping_error(dev->dev, dsts[i]);
732 if (ret) { 730 if (ret) {
733 dmaengine_unmap_put(um);
734 result("dst mapping error", total_tests, 731 result("dst mapping error", total_tests,
735 src_off, dst_off, len, ret); 732 src_off, dst_off, len, ret);
736 failed_tests++; 733 goto error_unmap_continue;
737 continue;
738 } 734 }
739 um->bidi_cnt++; 735 um->bidi_cnt++;
740 } 736 }
@@ -762,12 +758,10 @@ static int dmatest_func(void *data)
762 } 758 }
763 759
764 if (!tx) { 760 if (!tx) {
765 dmaengine_unmap_put(um);
766 result("prep error", total_tests, src_off, 761 result("prep error", total_tests, src_off,
767 dst_off, len, ret); 762 dst_off, len, ret);
768 msleep(100); 763 msleep(100);
769 failed_tests++; 764 goto error_unmap_continue;
770 continue;
771 } 765 }
772 766
773 done->done = false; 767 done->done = false;
@@ -776,12 +770,10 @@ static int dmatest_func(void *data)
776 cookie = tx->tx_submit(tx); 770 cookie = tx->tx_submit(tx);
777 771
778 if (dma_submit_error(cookie)) { 772 if (dma_submit_error(cookie)) {
779 dmaengine_unmap_put(um);
780 result("submit error", total_tests, src_off, 773 result("submit error", total_tests, src_off,
781 dst_off, len, ret); 774 dst_off, len, ret);
782 msleep(100); 775 msleep(100);
783 failed_tests++; 776 goto error_unmap_continue;
784 continue;
785 } 777 }
786 dma_async_issue_pending(chan); 778 dma_async_issue_pending(chan);
787 779
@@ -790,22 +782,20 @@ static int dmatest_func(void *data)
790 782
791 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 783 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
792 784
793 dmaengine_unmap_put(um);
794
795 if (!done->done) { 785 if (!done->done) {
796 result("test timed out", total_tests, src_off, dst_off, 786 result("test timed out", total_tests, src_off, dst_off,
797 len, 0); 787 len, 0);
798 failed_tests++; 788 goto error_unmap_continue;
799 continue;
800 } else if (status != DMA_COMPLETE) { 789 } else if (status != DMA_COMPLETE) {
801 result(status == DMA_ERROR ? 790 result(status == DMA_ERROR ?
802 "completion error status" : 791 "completion error status" :
803 "completion busy status", total_tests, src_off, 792 "completion busy status", total_tests, src_off,
804 dst_off, len, ret); 793 dst_off, len, ret);
805 failed_tests++; 794 goto error_unmap_continue;
806 continue;
807 } 795 }
808 796
797 dmaengine_unmap_put(um);
798
809 if (params->noverify) { 799 if (params->noverify) {
810 verbose_result("test passed", total_tests, src_off, 800 verbose_result("test passed", total_tests, src_off,
811 dst_off, len, 0); 801 dst_off, len, 0);
@@ -846,6 +836,12 @@ static int dmatest_func(void *data)
846 verbose_result("test passed", total_tests, src_off, 836 verbose_result("test passed", total_tests, src_off,
847 dst_off, len, 0); 837 dst_off, len, 0);
848 } 838 }
839
840 continue;
841
842error_unmap_continue:
843 dmaengine_unmap_put(um);
844 failed_tests++;
849 } 845 }
850 ktime = ktime_sub(ktime_get(), ktime); 846 ktime = ktime_sub(ktime_get(), ktime);
851 ktime = ktime_sub(ktime, comparetime); 847 ktime = ktime_sub(ktime, comparetime);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index c2fff3f6c9ca..4a09af3cd546 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data)
618{ 618{
619 struct imxdma_channel *imxdmac = (void *)data; 619 struct imxdma_channel *imxdmac = (void *)data;
620 struct imxdma_engine *imxdma = imxdmac->imxdma; 620 struct imxdma_engine *imxdma = imxdmac->imxdma;
621 struct imxdma_desc *desc; 621 struct imxdma_desc *desc, *next_desc;
622 unsigned long flags; 622 unsigned long flags;
623 623
624 spin_lock_irqsave(&imxdma->lock, flags); 624 spin_lock_irqsave(&imxdma->lock, flags);
@@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data)
648 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); 648 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
649 649
650 if (!list_empty(&imxdmac->ld_queue)) { 650 if (!list_empty(&imxdmac->ld_queue)) {
651 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, 651 next_desc = list_first_entry(&imxdmac->ld_queue,
652 node); 652 struct imxdma_desc, node);
653 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); 653 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
654 if (imxdma_xfer_desc(desc) < 0) 654 if (imxdma_xfer_desc(next_desc) < 0)
655 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", 655 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
656 __func__, imxdmac->channel); 656 __func__, imxdmac->channel);
657 } 657 }
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a2b0a0e71168..86708fb9bda1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1182,8 +1182,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
1182{ 1182{
1183 int ret = -EBUSY; 1183 int ret = -EBUSY;
1184 1184
1185 sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, 1185 sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
1186 GFP_NOWAIT); 1186 GFP_NOWAIT);
1187 if (!sdma->bd0) { 1187 if (!sdma->bd0) {
1188 ret = -ENOMEM; 1188 ret = -ENOMEM;
1189 goto out; 1189 goto out;
@@ -1205,8 +1205,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
1205 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 1205 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1206 int ret = 0; 1206 int ret = 0;
1207 1207
1208 desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, 1208 desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys,
1209 GFP_NOWAIT); 1209 GFP_NOWAIT);
1210 if (!desc->bd) { 1210 if (!desc->bd) {
1211 ret = -ENOMEM; 1211 ret = -ENOMEM;
1212 goto out; 1212 goto out;
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index b7ec56ae02a6..1a2028e1c29e 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -325,8 +325,8 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
325 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. 325 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
326 */ 326 */
327 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); 327 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
328 ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, 328 ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
329 &ring->tphys, GFP_NOWAIT); 329 &ring->tphys, GFP_NOWAIT);
330 if (!ring->txd) 330 if (!ring->txd)
331 return -ENOMEM; 331 return -ENOMEM;
332 332
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 35193b31a9e0..22cc7f68ef6e 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -416,9 +416,9 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
416 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 416 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
417 int ret; 417 int ret;
418 418
419 mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, 419 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
420 CCW_BLOCK_SIZE, 420 CCW_BLOCK_SIZE,
421 &mxs_chan->ccw_phys, GFP_KERNEL); 421 &mxs_chan->ccw_phys, GFP_KERNEL);
422 if (!mxs_chan->ccw) { 422 if (!mxs_chan->ccw) {
423 ret = -ENOMEM; 423 ret = -ENOMEM;
424 goto err_alloc; 424 goto err_alloc;
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 1d5988849aa6..eafd6c4b90fe 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1208,8 +1208,8 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
1208 ring->size = ret; 1208 ring->size = ret;
1209 1209
1210 /* Allocate memory for DMA ring descriptor */ 1210 /* Allocate memory for DMA ring descriptor */
1211 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, 1211 ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
1212 &ring->desc_paddr, GFP_KERNEL); 1212 &ring->desc_paddr, GFP_KERNEL);
1213 if (!ring->desc_vaddr) { 1213 if (!ring->desc_vaddr) {
1214 chan_err(chan, "Failed to allocate ring desc\n"); 1214 chan_err(chan, "Failed to allocate ring desc\n");
1215 return -ENOMEM; 1215 return -ENOMEM;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 02880963092f..cb20b411493e 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -879,10 +879,9 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
879 */ 879 */
880 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 880 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
881 /* Allocate the buffer descriptors. */ 881 /* Allocate the buffer descriptors. */
882 chan->seg_v = dma_zalloc_coherent(chan->dev, 882 chan->seg_v = dma_alloc_coherent(chan->dev,
883 sizeof(*chan->seg_v) * 883 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
884 XILINX_DMA_NUM_DESCS, 884 &chan->seg_p, GFP_KERNEL);
885 &chan->seg_p, GFP_KERNEL);
886 if (!chan->seg_v) { 885 if (!chan->seg_v) {
887 dev_err(chan->dev, 886 dev_err(chan->dev,
888 "unable to allocate channel %d descriptors\n", 887 "unable to allocate channel %d descriptors\n",
@@ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
895 * so allocating a desc segment during channel allocation for 894 * so allocating a desc segment during channel allocation for
896 * programming tail descriptor. 895 * programming tail descriptor.
897 */ 896 */
898 chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, 897 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
899 sizeof(*chan->cyclic_seg_v), 898 sizeof(*chan->cyclic_seg_v),
900 &chan->cyclic_seg_p, GFP_KERNEL); 899 &chan->cyclic_seg_p,
900 GFP_KERNEL);
901 if (!chan->cyclic_seg_v) { 901 if (!chan->cyclic_seg_v) {
902 dev_err(chan->dev, 902 dev_err(chan->dev,
903 "unable to allocate desc segment for cyclic DMA\n"); 903 "unable to allocate desc segment for cyclic DMA\n");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 8db51750ce93..4478787a247f 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -490,9 +490,9 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
490 list_add_tail(&desc->node, &chan->free_list); 490 list_add_tail(&desc->node, &chan->free_list);
491 } 491 }
492 492
493 chan->desc_pool_v = dma_zalloc_coherent(chan->dev, 493 chan->desc_pool_v = dma_alloc_coherent(chan->dev,
494 (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), 494 (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
495 &chan->desc_pool_p, GFP_KERNEL); 495 &chan->desc_pool_p, GFP_KERNEL);
496 if (!chan->desc_pool_v) 496 if (!chan->desc_pool_v)
497 return -ENOMEM; 497 return -ENOMEM;
498 498
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 4213cb0bb2a7..f8664bac9fa8 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -295,8 +295,8 @@ struct altr_sdram_mc_data {
295#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 295#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
296 296
297/* Sticky registers for Uncorrected Errors */ 297/* Sticky registers for Uncorrected Errors */
298#define S10_SYSMGR_UE_VAL_OFST 0x120 298#define S10_SYSMGR_UE_VAL_OFST 0x220
299#define S10_SYSMGR_UE_ADDR_OFST 0x124 299#define S10_SYSMGR_UE_ADDR_OFST 0x224
300 300
301#define S10_DDR0_IRQ_MASK BIT(16) 301#define S10_DDR0_IRQ_MASK BIT(16)
302 302
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 09b845e90114..a785ffd5af89 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1144,10 +1144,6 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
1144 if (device->is_local) 1144 if (device->is_local)
1145 return -ENODEV; 1145 return -ENODEV;
1146 1146
1147 if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
1148 WARN_ON(dma_set_max_seg_size(device->card->device,
1149 SBP2_MAX_SEG_SIZE));
1150
1151 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); 1147 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
1152 if (shost == NULL) 1148 if (shost == NULL)
1153 return -ENOMEM; 1149 return -ENOMEM;
@@ -1610,6 +1606,7 @@ static struct scsi_host_template scsi_driver_template = {
1610 .eh_abort_handler = sbp2_scsi_abort, 1606 .eh_abort_handler = sbp2_scsi_abort,
1611 .this_id = -1, 1607 .this_id = -1,
1612 .sg_tablesize = SG_ALL, 1608 .sg_tablesize = SG_ALL,
1609 .max_segment_size = SBP2_MAX_SEG_SIZE,
1613 .can_queue = 1, 1610 .can_queue = 1,
1614 .sdev_attrs = sbp2_scsi_sysfs_attrs, 1611 .sdev_attrs = sbp2_scsi_sysfs_attrs,
1615}; 1612};
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 472c88ae1c0f..92f843eaf1e0 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver)
119} 119}
120EXPORT_SYMBOL_GPL(scmi_driver_unregister); 120EXPORT_SYMBOL_GPL(scmi_driver_unregister);
121 121
122static void scmi_device_release(struct device *dev)
123{
124 kfree(to_scmi_dev(dev));
125}
126
122struct scmi_device * 127struct scmi_device *
123scmi_device_create(struct device_node *np, struct device *parent, int protocol) 128scmi_device_create(struct device_node *np, struct device *parent, int protocol)
124{ 129{
@@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
138 scmi_dev->dev.parent = parent; 143 scmi_dev->dev.parent = parent;
139 scmi_dev->dev.of_node = np; 144 scmi_dev->dev.of_node = np;
140 scmi_dev->dev.bus = &scmi_bus_type; 145 scmi_dev->dev.bus = &scmi_bus_type;
146 scmi_dev->dev.release = scmi_device_release;
141 dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); 147 dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
142 148
143 retval = device_register(&scmi_dev->dev); 149 retval = device_register(&scmi_dev->dev);
@@ -156,9 +162,8 @@ free_mem:
156void scmi_device_destroy(struct scmi_device *scmi_dev) 162void scmi_device_destroy(struct scmi_device *scmi_dev)
157{ 163{
158 scmi_handle_put(scmi_dev->handle); 164 scmi_handle_put(scmi_dev->handle);
159 device_unregister(&scmi_dev->dev);
160 ida_simple_remove(&scmi_bus_id, scmi_dev->id); 165 ida_simple_remove(&scmi_bus_id, scmi_dev->id);
161 kfree(scmi_dev); 166 device_unregister(&scmi_dev->dev);
162} 167}
163 168
164void scmi_set_handle(struct scmi_device *scmi_dev) 169void scmi_set_handle(struct scmi_device *scmi_dev)
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 23ea1ed409d1..352bd2473162 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -37,8 +37,9 @@ extern u64 efi_system_table;
37static struct ptdump_info efi_ptdump_info = { 37static struct ptdump_info efi_ptdump_info = {
38 .mm = &efi_mm, 38 .mm = &efi_mm,
39 .markers = (struct addr_marker[]){ 39 .markers = (struct addr_marker[]){
40 { 0, "UEFI runtime start" }, 40 { 0, "UEFI runtime start" },
41 { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" } 41 { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" },
42 { -1, NULL }
42 }, 43 },
43 .base_addr = 0, 44 .base_addr = 0,
44}; 45};
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4c46ff6f2242..55b77c576c42 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
592 592
593 early_memunmap(tbl, sizeof(*tbl)); 593 early_memunmap(tbl, sizeof(*tbl));
594 } 594 }
595 return 0;
596}
597 595
598int __init efi_apply_persistent_mem_reservations(void)
599{
600 if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { 596 if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
601 unsigned long prsv = efi.mem_reserve; 597 unsigned long prsv = efi.mem_reserve;
602 598
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index eee42d5e25ee..c037c6c5d0b7 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
75 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; 75 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
76 efi_status_t status; 76 efi_status_t status;
77 77
78 if (IS_ENABLED(CONFIG_ARM))
79 return;
80
81 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), 78 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
82 (void **)&rsv); 79 (void **)&rsv);
83 if (status != EFI_SUCCESS) { 80 if (status != EFI_SUCCESS) {
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 8903b9ccfc2b..e2abfdb5cee6 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
147static DEFINE_SEMAPHORE(efi_runtime_lock); 147static DEFINE_SEMAPHORE(efi_runtime_lock);
148 148
149/* 149/*
150 * Expose the EFI runtime lock to the UV platform
151 */
152#ifdef CONFIG_X86_UV
153extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
154#endif
155
156/*
150 * Calls the appropriate efi_runtime_service() with the appropriate 157 * Calls the appropriate efi_runtime_service() with the appropriate
151 * arguments. 158 * arguments.
152 * 159 *
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index a1a09e04fab8..13851b3d1c56 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -508,14 +508,11 @@ static int __init s10_init(void)
508 return -ENODEV; 508 return -ENODEV;
509 509
510 np = of_find_matching_node(fw_np, s10_of_match); 510 np = of_find_matching_node(fw_np, s10_of_match);
511 if (!np) { 511 if (!np)
512 of_node_put(fw_np);
513 return -ENODEV; 512 return -ENODEV;
514 }
515 513
516 of_node_put(np); 514 of_node_put(np);
517 ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); 515 ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
518 of_node_put(fw_np);
519 if (ret) 516 if (ret)
520 return ret; 517 return ret;
521 518
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 6b11f1314248..7f9e0304b510 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
66static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, 66static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
67 unsigned int nr, int value) 67 unsigned int nr, int value)
68{ 68{
69 if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) 69 if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
70 altr_a10sr_gpio_set(gc, nr, value);
70 return 0; 71 return 0;
72 }
71 return -EINVAL; 73 return -EINVAL;
72} 74}
73 75
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index e0d6a0a7bc69..e41223c05f6e 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
180 180
181static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) 181static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
182{ 182{
183 return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); 183 struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
184
185 switch (sprd_eic->type) {
186 case SPRD_EIC_DEBOUNCE:
187 return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
188 case SPRD_EIC_ASYNC:
189 return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
190 case SPRD_EIC_SYNC:
191 return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
192 default:
193 return -ENOTSUPP;
194 }
184} 195}
185 196
186static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) 197static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
368 irq_set_handler_locked(data, handle_edge_irq); 379 irq_set_handler_locked(data, handle_edge_irq);
369 break; 380 break;
370 case IRQ_TYPE_EDGE_BOTH: 381 case IRQ_TYPE_EDGE_BOTH:
382 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
371 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); 383 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
372 irq_set_handler_locked(data, handle_edge_irq); 384 irq_set_handler_locked(data, handle_edge_irq);
373 break; 385 break;
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 00e954f22bc9..74401e0adb29 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -30,6 +30,7 @@
30#define GPIO_REG_EDGE 0xA0 30#define GPIO_REG_EDGE 0xA0
31 31
32struct mtk_gc { 32struct mtk_gc {
33 struct irq_chip irq_chip;
33 struct gpio_chip chip; 34 struct gpio_chip chip;
34 spinlock_t lock; 35 spinlock_t lock;
35 int bank; 36 int bank;
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
189 return 0; 190 return 0;
190} 191}
191 192
192static struct irq_chip mediatek_gpio_irq_chip = {
193 .irq_unmask = mediatek_gpio_irq_unmask,
194 .irq_mask = mediatek_gpio_irq_mask,
195 .irq_mask_ack = mediatek_gpio_irq_mask,
196 .irq_set_type = mediatek_gpio_irq_type,
197};
198
199static int 193static int
200mediatek_gpio_xlate(struct gpio_chip *chip, 194mediatek_gpio_xlate(struct gpio_chip *chip,
201 const struct of_phandle_args *spec, u32 *flags) 195 const struct of_phandle_args *spec, u32 *flags)
@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
254 return ret; 248 return ret;
255 } 249 }
256 250
251 rg->irq_chip.name = dev_name(dev);
252 rg->irq_chip.parent_device = dev;
253 rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
254 rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
255 rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
256 rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
257
257 if (mtk->gpio_irq) { 258 if (mtk->gpio_irq) {
258 /* 259 /*
259 * Manually request the irq here instead of passing 260 * Manually request the irq here instead of passing
@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
270 return ret; 271 return ret;
271 } 272 }
272 273
273 ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip, 274 ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
274 0, handle_simple_irq, IRQ_TYPE_NONE); 275 0, handle_simple_irq, IRQ_TYPE_NONE);
275 if (ret) { 276 if (ret) {
276 dev_err(dev, "failed to add gpiochip_irqchip\n"); 277 dev_err(dev, "failed to add gpiochip_irqchip\n");
277 return ret; 278 return ret;
278 } 279 }
279 280
280 gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip, 281 gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
281 mtk->gpio_irq, NULL); 282 mtk->gpio_irq, NULL);
282 } 283 }
283 284
@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
310 mtk->gpio_irq = irq_of_parse_and_map(np, 0); 311 mtk->gpio_irq = irq_of_parse_and_map(np, 0);
311 mtk->dev = dev; 312 mtk->dev = dev;
312 platform_set_drvdata(pdev, mtk); 313 platform_set_drvdata(pdev, mtk);
313 mediatek_gpio_irq_chip.name = dev_name(dev);
314 314
315 for (i = 0; i < MTK_BANK_CNT; i++) { 315 for (i = 0; i < MTK_BANK_CNT; i++) {
316 ret = mediatek_gpio_bank_probe(dev, np, i); 316 ret = mediatek_gpio_bank_probe(dev, np, i);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 83617fdc661d..0dc96419efe3 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -289,7 +289,7 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
289 return pca953x_check_register(chip, reg, bank); 289 return pca953x_check_register(chip, reg, bank);
290} 290}
291 291
292const struct regmap_config pca953x_i2c_regmap = { 292static const struct regmap_config pca953x_i2c_regmap = {
293 .reg_bits = 8, 293 .reg_bits = 8,
294 .val_bits = 8, 294 .val_bits = 8,
295 295
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index adf72dda25a2..68a35b65925a 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
84 */ 84 */
85struct pcf857x { 85struct pcf857x {
86 struct gpio_chip chip; 86 struct gpio_chip chip;
87 struct irq_chip irqchip;
87 struct i2c_client *client; 88 struct i2c_client *client;
88 struct mutex lock; /* protect 'out' */ 89 struct mutex lock; /* protect 'out' */
89 unsigned out; /* software latch */ 90 unsigned out; /* software latch */
@@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
252 mutex_unlock(&gpio->lock); 253 mutex_unlock(&gpio->lock);
253} 254}
254 255
255static struct irq_chip pcf857x_irq_chip = {
256 .name = "pcf857x",
257 .irq_enable = pcf857x_irq_enable,
258 .irq_disable = pcf857x_irq_disable,
259 .irq_ack = noop,
260 .irq_mask = noop,
261 .irq_unmask = noop,
262 .irq_set_wake = pcf857x_irq_set_wake,
263 .irq_bus_lock = pcf857x_irq_bus_lock,
264 .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
265};
266
267/*-------------------------------------------------------------------------*/ 256/*-------------------------------------------------------------------------*/
268 257
269static int pcf857x_probe(struct i2c_client *client, 258static int pcf857x_probe(struct i2c_client *client,
@@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
376 365
377 /* Enable irqchip if we have an interrupt */ 366 /* Enable irqchip if we have an interrupt */
378 if (client->irq) { 367 if (client->irq) {
368 gpio->irqchip.name = "pcf857x",
369 gpio->irqchip.irq_enable = pcf857x_irq_enable,
370 gpio->irqchip.irq_disable = pcf857x_irq_disable,
371 gpio->irqchip.irq_ack = noop,
372 gpio->irqchip.irq_mask = noop,
373 gpio->irqchip.irq_unmask = noop,
374 gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
375 gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
376 gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
379 status = gpiochip_irqchip_add_nested(&gpio->chip, 377 status = gpiochip_irqchip_add_nested(&gpio->chip,
380 &pcf857x_irq_chip, 378 &gpio->irqchip,
381 0, handle_level_irq, 379 0, handle_level_irq,
382 IRQ_TYPE_NONE); 380 IRQ_TYPE_NONE);
383 if (status) { 381 if (status) {
@@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
392 if (status) 390 if (status)
393 goto fail; 391 goto fail;
394 392
395 gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip, 393 gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
396 client->irq); 394 client->irq);
397 gpio->irq_parent = client->irq; 395 gpio->irq_parent = client->irq;
398 } 396 }
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index e9600b556f39..bcc6be4a5cb2 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
245{ 245{
246 switch (gpio_type) { 246 switch (gpio_type) {
247 case PXA3XX_GPIO: 247 case PXA3XX_GPIO:
248 case MMP2_GPIO:
248 return false; 249 return false;
249 250
250 default: 251 default:
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 1b79ebcfce3e..541fa6ac399d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
253 struct vf610_gpio_port *port; 253 struct vf610_gpio_port *port;
254 struct resource *iores; 254 struct resource *iores;
255 struct gpio_chip *gc; 255 struct gpio_chip *gc;
256 int i;
256 int ret; 257 int ret;
257 258
258 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); 259 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
319 if (ret < 0) 320 if (ret < 0)
320 return ret; 321 return ret;
321 322
323 /* Mask all GPIO interrupts */
324 for (i = 0; i < gc->ngpio; i++)
325 vf610_gpio_writel(0, port->base + PORT_PCR(i));
326
322 /* Clear the interrupt status register for all GPIO's */ 327 /* Clear the interrupt status register for all GPIO's */
323 vf610_gpio_writel(~0, port->base + PORT_ISFR); 328 vf610_gpio_writel(~0, port->base + PORT_ISFR);
324 329
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 48534bda73d3..259cf6ab969b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -357,8 +357,6 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
357 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); 357 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
358 358
359 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 359 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
360 struct gpio_desc *desc;
361
362 if (event->irq_requested) { 360 if (event->irq_requested) {
363 if (event->irq_is_wake) 361 if (event->irq_is_wake)
364 disable_irq_wake(event->irq); 362 disable_irq_wake(event->irq);
@@ -366,11 +364,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
366 free_irq(event->irq, event); 364 free_irq(event->irq, event);
367 } 365 }
368 366
369 desc = event->desc;
370 if (WARN_ON(IS_ERR(desc)))
371 continue;
372 gpiochip_unlock_as_irq(chip, event->pin); 367 gpiochip_unlock_as_irq(chip, event->pin);
373 gpiochip_free_own_desc(desc); 368 gpiochip_free_own_desc(event->desc);
374 list_del(&event->node); 369 list_del(&event->node);
375 kfree(event); 370 kfree(event);
376 } 371 }
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 1651d7f0a303..d1adfdf50fb3 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
828 /* Do not leak kernel stack to userspace */ 828 /* Do not leak kernel stack to userspace */
829 memset(&ge, 0, sizeof(ge)); 829 memset(&ge, 0, sizeof(ge));
830 830
831 ge.timestamp = le->timestamp; 831 /*
832 * We may be running from a nested threaded interrupt in which case
833 * we didn't get the timestamp from lineevent_irq_handler().
834 */
835 if (!le->timestamp)
836 ge.timestamp = ktime_get_real_ns();
837 else
838 ge.timestamp = le->timestamp;
832 839
833 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 840 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
834 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 841 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a028661d9e20..92b11de19581 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
576 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 576 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, 577 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, 578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0, 0, 0, 0, 0 }, 580 { 0, 0, 0, 0, 0 },
580}; 581};
581 582
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8a078f4ae73d..7ff3a28fc903 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1701,8 +1701,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1701 amdgpu_xgmi_add_device(adev); 1701 amdgpu_xgmi_add_device(adev);
1702 amdgpu_amdkfd_device_init(adev); 1702 amdgpu_amdkfd_device_init(adev);
1703 1703
1704 if (amdgpu_sriov_vf(adev)) 1704 if (amdgpu_sriov_vf(adev)) {
1705 amdgpu_virt_init_data_exchange(adev);
1705 amdgpu_virt_release_full_gpu(adev, true); 1706 amdgpu_virt_release_full_gpu(adev, true);
1707 }
1706 1708
1707 return 0; 1709 return 0;
1708} 1710}
@@ -2632,9 +2634,6 @@ fence_driver_init:
2632 goto failed; 2634 goto failed;
2633 } 2635 }
2634 2636
2635 if (amdgpu_sriov_vf(adev))
2636 amdgpu_virt_init_data_exchange(adev);
2637
2638 amdgpu_fbdev_init(adev); 2637 amdgpu_fbdev_init(adev);
2639 2638
2640 r = amdgpu_pm_sysfs_init(adev); 2639 r = amdgpu_pm_sysfs_init(adev);
@@ -2798,7 +2797,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2798 struct drm_framebuffer *fb = crtc->primary->fb; 2797 struct drm_framebuffer *fb = crtc->primary->fb;
2799 struct amdgpu_bo *robj; 2798 struct amdgpu_bo *robj;
2800 2799
2801 if (amdgpu_crtc->cursor_bo) { 2800 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
2802 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2801 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2803 r = amdgpu_bo_reserve(aobj, true); 2802 r = amdgpu_bo_reserve(aobj, true);
2804 if (r == 0) { 2803 if (r == 0) {
@@ -2906,7 +2905,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2906 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2905 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2907 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2906 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2908 2907
2909 if (amdgpu_crtc->cursor_bo) { 2908 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
2910 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2909 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2911 r = amdgpu_bo_reserve(aobj, true); 2910 r = amdgpu_bo_reserve(aobj, true);
2912 if (r == 0) { 2911 if (r == 0) {
@@ -3226,6 +3225,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3226 r = amdgpu_ib_ring_tests(adev); 3225 r = amdgpu_ib_ring_tests(adev);
3227 3226
3228error: 3227error:
3228 amdgpu_virt_init_data_exchange(adev);
3229 amdgpu_virt_release_full_gpu(adev, true); 3229 amdgpu_virt_release_full_gpu(adev, true);
3230 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 3230 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3231 atomic_inc(&adev->vram_lost_counter); 3231 atomic_inc(&adev->vram_lost_counter);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 15ce7e681d67..b083b219b1a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -188,10 +188,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
188 goto cleanup; 188 goto cleanup;
189 } 189 }
190 190
191 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); 191 if (!adev->enable_virtual_display) {
192 if (unlikely(r != 0)) { 192 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
193 DRM_ERROR("failed to pin new abo buffer before flip\n"); 193 if (unlikely(r != 0)) {
194 goto unreserve; 194 DRM_ERROR("failed to pin new abo buffer before flip\n");
195 goto unreserve;
196 }
195 } 197 }
196 198
197 r = amdgpu_ttm_alloc_gart(&new_abo->tbo); 199 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
@@ -211,7 +213,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
211 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); 213 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
212 amdgpu_bo_unreserve(new_abo); 214 amdgpu_bo_unreserve(new_abo);
213 215
214 work->base = amdgpu_bo_gpu_offset(new_abo); 216 if (!adev->enable_virtual_display)
217 work->base = amdgpu_bo_gpu_offset(new_abo);
215 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + 218 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
216 amdgpu_get_vblank_counter_kms(dev, work->crtc_id); 219 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
217 220
@@ -242,9 +245,10 @@ pflip_cleanup:
242 goto cleanup; 245 goto cleanup;
243 } 246 }
244unpin: 247unpin:
245 if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { 248 if (!adev->enable_virtual_display)
246 DRM_ERROR("failed to unpin new abo in error path\n"); 249 if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
247 } 250 DRM_ERROR("failed to unpin new abo in error path\n");
251
248unreserve: 252unreserve:
249 amdgpu_bo_unreserve(new_abo); 253 amdgpu_bo_unreserve(new_abo);
250 254
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index bc62bf41b7e9..5dc349173e4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
212 } 212 }
213 213
214 if (amdgpu_device_is_px(dev)) { 214 if (amdgpu_device_is_px(dev)) {
215 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
215 pm_runtime_use_autosuspend(dev->dev); 216 pm_runtime_use_autosuspend(dev->dev);
216 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 217 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
217 pm_runtime_set_active(dev->dev); 218 pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 1f61ed95727c..0ed41a9d2d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1686 effective_mode &= ~S_IWUSR; 1686 effective_mode &= ~S_IWUSR;
1687 1687
1688 if ((adev->flags & AMD_IS_APU) && 1688 if ((adev->flags & AMD_IS_APU) &&
1689 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 1689 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
1690 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1690 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 1691 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
1691 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 1692 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
1692 return 0; 1693 return 0;
@@ -2008,6 +2009,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
2008 2009
2009int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 2010int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2010{ 2011{
2012 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2011 int ret; 2013 int ret;
2012 2014
2013 if (adev->pm.sysfs_initialized) 2015 if (adev->pm.sysfs_initialized)
@@ -2091,12 +2093,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2091 "pp_power_profile_mode\n"); 2093 "pp_power_profile_mode\n");
2092 return ret; 2094 return ret;
2093 } 2095 }
2094 ret = device_create_file(adev->dev, 2096 if (hwmgr->od_enabled) {
2095 &dev_attr_pp_od_clk_voltage); 2097 ret = device_create_file(adev->dev,
2096 if (ret) { 2098 &dev_attr_pp_od_clk_voltage);
2097 DRM_ERROR("failed to create device file " 2099 if (ret) {
2098 "pp_od_clk_voltage\n"); 2100 DRM_ERROR("failed to create device file "
2099 return ret; 2101 "pp_od_clk_voltage\n");
2102 return ret;
2103 }
2100 } 2104 }
2101 ret = device_create_file(adev->dev, 2105 ret = device_create_file(adev->dev,
2102 &dev_attr_gpu_busy_percent); 2106 &dev_attr_gpu_busy_percent);
@@ -2118,6 +2122,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2118 2122
2119void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 2123void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2120{ 2124{
2125 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2126
2121 if (adev->pm.dpm_enabled == 0) 2127 if (adev->pm.dpm_enabled == 0)
2122 return; 2128 return;
2123 2129
@@ -2138,8 +2144,9 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2138 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 2144 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
2139 device_remove_file(adev->dev, 2145 device_remove_file(adev->dev,
2140 &dev_attr_pp_power_profile_mode); 2146 &dev_attr_pp_power_profile_mode);
2141 device_remove_file(adev->dev, 2147 if (hwmgr->od_enabled)
2142 &dev_attr_pp_od_clk_voltage); 2148 device_remove_file(adev->dev,
2149 &dev_attr_pp_od_clk_voltage);
2143 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); 2150 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
2144} 2151}
2145 2152
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 71913a18d142..a38e0fb4a6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -38,6 +38,7 @@
38#include "amdgpu_gem.h" 38#include "amdgpu_gem.h"
39#include <drm/amdgpu_drm.h> 39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h> 40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
41 42
42/** 43/**
43 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table 44 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
@@ -187,6 +188,48 @@ error:
187 return ERR_PTR(ret); 188 return ERR_PTR(ret);
188} 189}
189 190
191static int
192__reservation_object_make_exclusive(struct reservation_object *obj)
193{
194 struct dma_fence **fences;
195 unsigned int count;
196 int r;
197
198 if (!reservation_object_get_list(obj)) /* no shared fences to convert */
199 return 0;
200
201 r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
202 if (r)
203 return r;
204
205 if (count == 0) {
206 /* Now that was unexpected. */
207 } else if (count == 1) {
208 reservation_object_add_excl_fence(obj, fences[0]);
209 dma_fence_put(fences[0]);
210 kfree(fences);
211 } else {
212 struct dma_fence_array *array;
213
214 array = dma_fence_array_create(count, fences,
215 dma_fence_context_alloc(1), 0,
216 false);
217 if (!array)
218 goto err_fences_put;
219
220 reservation_object_add_excl_fence(obj, &array->base);
221 dma_fence_put(&array->base);
222 }
223
224 return 0;
225
226err_fences_put:
227 while (count--)
228 dma_fence_put(fences[count]);
229 kfree(fences);
230 return -ENOMEM;
231}
232
190/** 233/**
191 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation 234 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
192 * @dma_buf: Shared DMA buffer 235 * @dma_buf: Shared DMA buffer
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
218 261
219 if (attach->dev->driver != adev->dev->driver) { 262 if (attach->dev->driver != adev->dev->driver) {
220 /* 263 /*
221 * Wait for all shared fences to complete before we switch to future 264 * We only create shared fences for internal use, but importers
222 * use of exclusive fence on this prime shared bo. 265 * of the dmabuf rely on exclusive fences for implicitly
266 * tracking write hazards. As any of the current fences may
267 * correspond to a write, we need to convert all existing
268 * fences on the reservation object into a single exclusive
269 * fence.
223 */ 270 */
224 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 271 r = __reservation_object_make_exclusive(bo->tbo.resv);
225 true, false, 272 if (r)
226 MAX_SCHEDULE_TIMEOUT);
227 if (unlikely(r < 0)) {
228 DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
229 goto error_unreserve; 273 goto error_unreserve;
230 }
231 } 274 }
232 275
233 /* pin buffer into GTT */ 276 /* pin buffer into GTT */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8fab0d637ee5..3a9b48b227ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle)
90 adev->psp.sos_fw = NULL; 90 adev->psp.sos_fw = NULL;
91 release_firmware(adev->psp.asd_fw); 91 release_firmware(adev->psp.asd_fw);
92 adev->psp.asd_fw = NULL; 92 adev->psp.asd_fw = NULL;
93 release_firmware(adev->psp.ta_fw); 93 if (adev->psp.ta_fw) {
94 adev->psp.ta_fw = NULL; 94 release_firmware(adev->psp.ta_fw);
95 adev->psp.ta_fw = NULL;
96 }
95 return 0; 97 return 0;
96} 98}
97 99
@@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
435 struct ta_xgmi_shared_memory *xgmi_cmd; 437 struct ta_xgmi_shared_memory *xgmi_cmd;
436 int ret; 438 int ret;
437 439
440 if (!psp->adev->psp.ta_fw)
441 return -ENOENT;
442
438 if (!psp->xgmi_context.initialized) { 443 if (!psp->xgmi_context.initialized) {
439 ret = psp_xgmi_init_shared_buf(psp); 444 ret = psp_xgmi_init_shared_buf(psp);
440 if (ret) 445 if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e73d152659a2..698bcb8ce61d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -638,12 +638,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
638 struct ttm_bo_global *glob = adev->mman.bdev.glob; 638 struct ttm_bo_global *glob = adev->mman.bdev.glob;
639 struct amdgpu_vm_bo_base *bo_base; 639 struct amdgpu_vm_bo_base *bo_base;
640 640
641#if 0
641 if (vm->bulk_moveable) { 642 if (vm->bulk_moveable) {
642 spin_lock(&glob->lru_lock); 643 spin_lock(&glob->lru_lock);
643 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); 644 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
644 spin_unlock(&glob->lru_lock); 645 spin_unlock(&glob->lru_lock);
645 return; 646 return;
646 } 647 }
648#endif
647 649
648 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); 650 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
649 651
@@ -847,9 +849,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
847 bp->size = amdgpu_vm_bo_size(adev, level); 849 bp->size = amdgpu_vm_bo_size(adev, level);
848 bp->byte_align = AMDGPU_GPU_PAGE_SIZE; 850 bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
849 bp->domain = AMDGPU_GEM_DOMAIN_VRAM; 851 bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
850 if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
851 adev->flags & AMD_IS_APU)
852 bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
853 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); 852 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
854 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 853 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
855 AMDGPU_GEM_CREATE_CPU_GTT_USWC; 854 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
@@ -3366,14 +3365,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3366 struct amdgpu_task_info *task_info) 3365 struct amdgpu_task_info *task_info)
3367{ 3366{
3368 struct amdgpu_vm *vm; 3367 struct amdgpu_vm *vm;
3368 unsigned long flags;
3369 3369
3370 spin_lock(&adev->vm_manager.pasid_lock); 3370 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3371 3371
3372 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3372 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3373 if (vm) 3373 if (vm)
3374 *task_info = vm->task_info; 3374 *task_info = vm->task_info;
3375 3375
3376 spin_unlock(&adev->vm_manager.pasid_lock); 3376 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3377} 3377}
3378 3378
3379/** 3379/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index fdace004544d..e4cc1d48eaab 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -167,19 +167,6 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
168 168
169 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 169 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
170 if (crtc->primary->fb) {
171 int r;
172 struct amdgpu_bo *abo;
173
174 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
175 r = amdgpu_bo_reserve(abo, true);
176 if (unlikely(r))
177 DRM_ERROR("failed to reserve abo before unpin\n");
178 else {
179 amdgpu_bo_unpin(abo);
180 amdgpu_bo_unreserve(abo);
181 }
182 }
183 170
184 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 171 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
185 amdgpu_crtc->encoder = NULL; 172 amdgpu_crtc->encoder = NULL;
@@ -692,7 +679,9 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
692 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 679 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
693 680
694 drm_crtc_vblank_put(&amdgpu_crtc->base); 681 drm_crtc_vblank_put(&amdgpu_crtc->base);
695 schedule_work(&works->unpin_work); 682 amdgpu_bo_unref(&works->old_abo);
683 kfree(works->shared);
684 kfree(works);
696 685
697 return 0; 686 return 0;
698} 687}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 381f593b0cda..57cb3a51bda7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4233,7 +4233,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4233 u32 tmp; 4233 u32 tmp;
4234 u32 rb_bufsz; 4234 u32 rb_bufsz;
4235 u64 rb_addr, rptr_addr, wptr_gpu_addr; 4235 u64 rb_addr, rptr_addr, wptr_gpu_addr;
4236 int r;
4237 4236
4238 /* Set the write pointer delay */ 4237 /* Set the write pointer delay */
4239 WREG32(mmCP_RB_WPTR_DELAY, 0); 4238 WREG32(mmCP_RB_WPTR_DELAY, 0);
@@ -4278,9 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4278 amdgpu_ring_clear_ring(ring); 4277 amdgpu_ring_clear_ring(ring);
4279 gfx_v8_0_cp_gfx_start(adev); 4278 gfx_v8_0_cp_gfx_start(adev);
4280 ring->sched.ready = true; 4279 ring->sched.ready = true;
4281 r = amdgpu_ring_test_helper(ring);
4282 4280
4283 return r; 4281 return 0;
4284} 4282}
4285 4283
4286static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 4284static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4369,10 +4367,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4369 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 4367 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4370 } 4368 }
4371 4369
4372 r = amdgpu_ring_test_helper(kiq_ring); 4370 amdgpu_ring_commit(kiq_ring);
4373 if (r) 4371
4374 DRM_ERROR("KCQ enable failed\n"); 4372 return 0;
4375 return r;
4376} 4373}
4377 4374
4378static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) 4375static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
@@ -4709,16 +4706,32 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4709 if (r) 4706 if (r)
4710 goto done; 4707 goto done;
4711 4708
4712 /* Test KCQs - reversing the order of rings seems to fix ring test failure 4709done:
4713 * after GPU reset 4710 return r;
4714 */ 4711}
4715 for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { 4712
4713static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4714{
4715 int r, i;
4716 struct amdgpu_ring *ring;
4717
4718 /* collect all the ring_tests here, gfx, kiq, compute */
4719 ring = &adev->gfx.gfx_ring[0];
4720 r = amdgpu_ring_test_helper(ring);
4721 if (r)
4722 return r;
4723
4724 ring = &adev->gfx.kiq.ring;
4725 r = amdgpu_ring_test_helper(ring);
4726 if (r)
4727 return r;
4728
4729 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4716 ring = &adev->gfx.compute_ring[i]; 4730 ring = &adev->gfx.compute_ring[i];
4717 r = amdgpu_ring_test_helper(ring); 4731 amdgpu_ring_test_helper(ring);
4718 } 4732 }
4719 4733
4720done: 4734 return 0;
4721 return r;
4722} 4735}
4723 4736
4724static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) 4737static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
@@ -4739,6 +4752,11 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4739 r = gfx_v8_0_kcq_resume(adev); 4752 r = gfx_v8_0_kcq_resume(adev);
4740 if (r) 4753 if (r)
4741 return r; 4754 return r;
4755
4756 r = gfx_v8_0_cp_test_all_rings(adev);
4757 if (r)
4758 return r;
4759
4742 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 4760 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4743 4761
4744 return 0; 4762 return 0;
@@ -5086,6 +5104,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
5086 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) 5104 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5087 gfx_v8_0_cp_gfx_resume(adev); 5105 gfx_v8_0_cp_gfx_resume(adev);
5088 5106
5107 gfx_v8_0_cp_test_all_rings(adev);
5108
5089 adev->gfx.rlc.funcs->start(adev); 5109 adev->gfx.rlc.funcs->start(adev);
5090 5110
5091 return 0; 5111 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7556716038d3..fbca0494f871 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -113,7 +113,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) 116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
117}; 120};
118 121
119static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = 122static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -135,10 +138,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), 139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), 140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), 141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
142}; 142};
143 143
144static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = 144static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -3587,6 +3587,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
3587{ 3587{
3588 uint32_t data, def; 3588 uint32_t data, def;
3589 3589
3590 amdgpu_gfx_rlc_enter_safe_mode(adev);
3591
3590 /* It is disabled by HW by default */ 3592 /* It is disabled by HW by default */
3591 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3593 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3592 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3594 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -3651,6 +3653,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
3651 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3653 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3652 } 3654 }
3653 } 3655 }
3656
3657 amdgpu_gfx_rlc_exit_safe_mode(adev);
3654} 3658}
3655 3659
3656static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, 3660static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8cbb4655896a..b11a1c17a7f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
174 return r; 174 return r;
175 } 175 }
176 /* Retrieve checksum from mailbox2 */ 176 /* Retrieve checksum from mailbox2 */
177 if (req == IDH_REQ_GPU_INIT_ACCESS) { 177 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
178 adev->virt.fw_reserve.checksum_key = 178 adev->virt.fw_reserve.checksum_key =
179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); 180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 4cd31a276dcd..186db182f924 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
93static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, 93static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
94 bool enable) 94 bool enable)
95{ 95{
96 u32 tmp = 0;
96 97
98 if (enable) {
99 tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
100 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
101 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
102
103 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
104 lower_32_bits(adev->doorbell.base));
105 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
106 upper_32_bits(adev->doorbell.base));
107 }
108
109 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
97} 110}
98 111
99static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, 112static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0c6e7f9b143f..189fcb004579 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
152 152
153 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 153 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
154 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); 154 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
155 if (err) 155 if (err) {
156 goto out2; 156 release_firmware(adev->psp.ta_fw);
157 157 adev->psp.ta_fw = NULL;
158 err = amdgpu_ucode_validate(adev->psp.ta_fw); 158 dev_info(adev->dev,
159 if (err) 159 "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
160 goto out2; 160 } else {
161 161 err = amdgpu_ucode_validate(adev->psp.ta_fw);
162 ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; 162 if (err)
163 adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); 163 goto out2;
164 adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); 164
165 adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + 165 ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
166 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 166 adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
167 adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
168 adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
169 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
170 }
167 171
168 return 0; 172 return 0;
169 173
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index fd0bfe140ee0..aa2f71cc1eba 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -78,7 +78,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
78 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 78 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
79 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), 79 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
80 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), 80 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), 81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), 82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
84 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -96,6 +95,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
96static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { 95static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
97 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), 96 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
98 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), 97 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
98 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
99 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), 99 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
100 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) 100 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
101}; 101};
@@ -103,6 +103,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
103static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { 103static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
104 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), 104 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
105 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), 105 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
106 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
106 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), 107 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
107 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) 108 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
108}; 109};
@@ -127,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
127 128
128static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = 129static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
129{ 130{
130 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), 131 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
131 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), 132 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
132 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), 133 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
133 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), 134 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
@@ -157,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
157}; 158};
158 159
159static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { 160static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
160 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), 161 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
161 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), 162 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
162 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), 163 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
163 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), 164 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8849b74078d6..9b639974c70c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle)
729 case CHIP_RAVEN: 729 case CHIP_RAVEN:
730 adev->asic_funcs = &soc15_asic_funcs; 730 adev->asic_funcs = &soc15_asic_funcs;
731 if (adev->rev_id >= 0x8) 731 if (adev->rev_id >= 0x8)
732 adev->external_rev_id = adev->rev_id + 0x81; 732 adev->external_rev_id = adev->rev_id + 0x79;
733 else if (adev->pdev->device == 0x15d8) 733 else if (adev->pdev->device == 0x15d8)
734 adev->external_rev_id = adev->rev_id + 0x41; 734 adev->external_rev_id = adev->rev_id + 0x41;
735 else if (adev->rev_id == 1)
736 adev->external_rev_id = adev->rev_id + 0x20;
735 else 737 else
736 adev->external_rev_id = 0x1; 738 adev->external_rev_id = adev->rev_id + 0x01;
737 739
738 if (adev->rev_id >= 0x8) { 740 if (adev->rev_id >= 0x8) {
739 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 741 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index fbf0ee5201c3..c3613604a4f8 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,8 +4,8 @@
4 4
5config HSA_AMD 5config HSA_AMD
6 bool "HSA kernel driver for AMD GPU devices" 6 bool "HSA kernel driver for AMD GPU devices"
7 depends on DRM_AMDGPU && X86_64 7 depends on DRM_AMDGPU && (X86_64 || ARM64)
8 imply AMD_IOMMU_V2 8 imply AMD_IOMMU_V2 if X86_64
9 select MMU_NOTIFIER 9 select MMU_NOTIFIER
10 help 10 help
11 Enable this if you want to use HSA features on AMD GPU devices. 11 Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index b7bc7d7d048f..2e7c44955f43 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
863 return 0; 863 return 0;
864} 864}
865 865
866#ifdef CONFIG_X86_64
866static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, 867static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
867 uint32_t *num_entries, 868 uint32_t *num_entries,
868 struct crat_subtype_iolink *sub_type_hdr) 869 struct crat_subtype_iolink *sub_type_hdr)
@@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
905 906
906 return 0; 907 return 0;
907} 908}
909#endif
908 910
909/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU 911/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
910 * 912 *
@@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
920 struct crat_subtype_generic *sub_type_hdr; 922 struct crat_subtype_generic *sub_type_hdr;
921 int avail_size = *size; 923 int avail_size = *size;
922 int numa_node_id; 924 int numa_node_id;
925#ifdef CONFIG_X86_64
923 uint32_t entries = 0; 926 uint32_t entries = 0;
927#endif
924 int ret = 0; 928 int ret = 0;
925 929
926 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) 930 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
@@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
982 sub_type_hdr->length); 986 sub_type_hdr->length);
983 987
984 /* Fill in Subtype: IO Link */ 988 /* Fill in Subtype: IO Link */
989#ifdef CONFIG_X86_64
985 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, 990 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
986 &entries, 991 &entries,
987 (struct crat_subtype_iolink *)sub_type_hdr); 992 (struct crat_subtype_iolink *)sub_type_hdr);
@@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
992 997
993 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + 998 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
994 sub_type_hdr->length * entries); 999 sub_type_hdr->length * entries);
1000#else
1001 pr_info("IO link not available for non x86 platforms\n");
1002#endif
995 1003
996 crat_table->num_domains++; 1004 crat_table->num_domains++;
997 } 1005 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5f5b2acedbac..09da91644f9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
1093 * the GPU device is not already present in the topology device 1093 * the GPU device is not already present in the topology device
1094 * list then return NULL. This means a new topology device has to 1094 * list then return NULL. This means a new topology device has to
1095 * be created for this GPU. 1095 * be created for this GPU.
1096 * TODO: Rather than assiging @gpu to first topology device withtout
1097 * gpu attached, it will better to have more stringent check.
1098 */ 1096 */
1099static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) 1097static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1100{ 1098{
@@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1102 struct kfd_topology_device *out_dev = NULL; 1100 struct kfd_topology_device *out_dev = NULL;
1103 1101
1104 down_write(&topology_lock); 1102 down_write(&topology_lock);
1105 list_for_each_entry(dev, &topology_device_list, list) 1103 list_for_each_entry(dev, &topology_device_list, list) {
1104 /* Discrete GPUs need their own topology device list
1105 * entries. Don't assign them to CPU/APU nodes.
1106 */
1107 if (!gpu->device_info->needs_iommu_device &&
1108 dev->node_props.cpu_cores_count)
1109 continue;
1110
1106 if (!dev->gpu && (dev->node_props.simd_count > 0)) { 1111 if (!dev->gpu && (dev->node_props.simd_count > 0)) {
1107 dev->gpu = gpu; 1112 dev->gpu = gpu;
1108 out_dev = dev; 1113 out_dev = dev;
1109 break; 1114 break;
1110 } 1115 }
1116 }
1111 up_write(&topology_lock); 1117 up_write(&topology_lock);
1112 return out_dev; 1118 return out_dev;
1113} 1119}
@@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
1392 1398
1393static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) 1399static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
1394{ 1400{
1395 const struct cpuinfo_x86 *cpuinfo;
1396 int first_cpu_of_numa_node; 1401 int first_cpu_of_numa_node;
1397 1402
1398 if (!cpumask || cpumask == cpu_none_mask) 1403 if (!cpumask || cpumask == cpu_none_mask)
@@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
1400 first_cpu_of_numa_node = cpumask_first(cpumask); 1405 first_cpu_of_numa_node = cpumask_first(cpumask);
1401 if (first_cpu_of_numa_node >= nr_cpu_ids) 1406 if (first_cpu_of_numa_node >= nr_cpu_ids)
1402 return -1; 1407 return -1;
1403 cpuinfo = &cpu_data(first_cpu_of_numa_node); 1408#ifdef CONFIG_X86_64
1404 1409 return cpu_data(first_cpu_of_numa_node).apicid;
1405 return cpuinfo->apicid; 1410#else
1411 return first_cpu_of_numa_node;
1412#endif
1406} 1413}
1407 1414
1408/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor 1415/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a9a28dbc3e24..5296b8f3e0ab 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -699,22 +699,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
699{ 699{
700 struct amdgpu_dm_connector *aconnector; 700 struct amdgpu_dm_connector *aconnector;
701 struct drm_connector *connector; 701 struct drm_connector *connector;
702 struct drm_dp_mst_topology_mgr *mgr;
703 int ret;
704 bool need_hotplug = false;
702 705
703 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 706 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
704 707
705 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 708 list_for_each_entry(connector, &dev->mode_config.connector_list,
706 aconnector = to_amdgpu_dm_connector(connector); 709 head) {
707 if (aconnector->dc_link->type == dc_connection_mst_branch && 710 aconnector = to_amdgpu_dm_connector(connector);
708 !aconnector->mst_port) { 711 if (aconnector->dc_link->type != dc_connection_mst_branch ||
712 aconnector->mst_port)
713 continue;
714
715 mgr = &aconnector->mst_mgr;
709 716
710 if (suspend) 717 if (suspend) {
711 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); 718 drm_dp_mst_topology_mgr_suspend(mgr);
712 else 719 } else {
713 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); 720 ret = drm_dp_mst_topology_mgr_resume(mgr);
714 } 721 if (ret < 0) {
722 drm_dp_mst_topology_mgr_set_mst(mgr, false);
723 need_hotplug = true;
724 }
725 }
715 } 726 }
716 727
717 drm_modeset_unlock(&dev->mode_config.connection_mutex); 728 drm_modeset_unlock(&dev->mode_config.connection_mutex);
729
730 if (need_hotplug)
731 drm_kms_helper_hotplug_event(dev);
718} 732}
719 733
720/** 734/**
@@ -772,12 +786,13 @@ static int dm_suspend(void *handle)
772 struct amdgpu_display_manager *dm = &adev->dm; 786 struct amdgpu_display_manager *dm = &adev->dm;
773 int ret = 0; 787 int ret = 0;
774 788
789 WARN_ON(adev->dm.cached_state);
790 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
791
775 s3_handle_mst(adev->ddev, true); 792 s3_handle_mst(adev->ddev, true);
776 793
777 amdgpu_dm_irq_suspend(adev); 794 amdgpu_dm_irq_suspend(adev);
778 795
779 WARN_ON(adev->dm.cached_state);
780 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
781 796
782 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 797 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
783 798
@@ -898,7 +913,6 @@ static int dm_resume(void *handle)
898 struct drm_plane_state *new_plane_state; 913 struct drm_plane_state *new_plane_state;
899 struct dm_plane_state *dm_new_plane_state; 914 struct dm_plane_state *dm_new_plane_state;
900 enum dc_connection_type new_connection_type = dc_connection_none; 915 enum dc_connection_type new_connection_type = dc_connection_none;
901 int ret;
902 int i; 916 int i;
903 917
904 /* power on hardware */ 918 /* power on hardware */
@@ -971,13 +985,13 @@ static int dm_resume(void *handle)
971 } 985 }
972 } 986 }
973 987
974 ret = drm_atomic_helper_resume(ddev, dm->cached_state); 988 drm_atomic_helper_resume(ddev, dm->cached_state);
975 989
976 dm->cached_state = NULL; 990 dm->cached_state = NULL;
977 991
978 amdgpu_dm_irq_resume_late(adev); 992 amdgpu_dm_irq_resume_late(adev);
979 993
980 return ret; 994 return 0;
981} 995}
982 996
983/** 997/**
@@ -1759,7 +1773,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1759 + caps.min_input_signal * 0x101; 1773 + caps.min_input_signal * 0x101;
1760 1774
1761 if (dc_link_set_backlight_level(dm->backlight_link, 1775 if (dc_link_set_backlight_level(dm->backlight_link,
1762 brightness, 0, 0)) 1776 brightness, 0))
1763 return 0; 1777 return 0;
1764 else 1778 else
1765 return 1; 1779 return 1;
@@ -4069,7 +4083,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4069 } 4083 }
4070 4084
4071 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 4085 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4072 connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 4086 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4087 connector_type == DRM_MODE_CONNECTOR_eDP) {
4073 drm_connector_attach_vrr_capable_property( 4088 drm_connector_attach_vrr_capable_property(
4074 &aconnector->base); 4089 &aconnector->base);
4075 } 4090 }
@@ -5920,7 +5935,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
5920 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 5935 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5921 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 5936 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
5922 !new_crtc_state->color_mgmt_changed && 5937 !new_crtc_state->color_mgmt_changed &&
5923 !new_crtc_state->vrr_enabled) 5938 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
5924 continue; 5939 continue;
5925 5940
5926 if (!new_crtc_state->enable) 5941 if (!new_crtc_state->enable)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 9a7ac58eb18e..ddd75a4d8ba5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
671 return bytes_from_user; 671 return bytes_from_user;
672} 672}
673 673
674/*
675 * Returns the min and max vrr vfreq through the connector's debugfs file.
676 * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
677 */
678static int vrr_range_show(struct seq_file *m, void *data)
679{
680 struct drm_connector *connector = m->private;
681 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
682
683 if (connector->status != connector_status_connected)
684 return -ENODEV;
685
686 seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq);
687 seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq);
688
689 return 0;
690}
691DEFINE_SHOW_ATTRIBUTE(vrr_range);
692
674static const struct file_operations dp_link_settings_debugfs_fops = { 693static const struct file_operations dp_link_settings_debugfs_fops = {
675 .owner = THIS_MODULE, 694 .owner = THIS_MODULE,
676 .read = dp_link_settings_read, 695 .read = dp_link_settings_read,
@@ -697,7 +716,8 @@ static const struct {
697} dp_debugfs_entries[] = { 716} dp_debugfs_entries[] = {
698 {"link_settings", &dp_link_settings_debugfs_fops}, 717 {"link_settings", &dp_link_settings_debugfs_fops},
699 {"phy_settings", &dp_phy_settings_debugfs_fop}, 718 {"phy_settings", &dp_phy_settings_debugfs_fop},
700 {"test_pattern", &dp_phy_test_pattern_fops} 719 {"test_pattern", &dp_phy_test_pattern_fops},
720 {"vrr_range", &vrr_range_fops}
701}; 721};
702 722
703int connector_debugfs_init(struct amdgpu_dm_connector *connector) 723int connector_debugfs_init(struct amdgpu_dm_connector *connector)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 52deacf39841..b0265dbebd4c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
2190 2190
2191bool dc_link_set_backlight_level(const struct dc_link *link, 2191bool dc_link_set_backlight_level(const struct dc_link *link,
2192 uint32_t backlight_pwm_u16_16, 2192 uint32_t backlight_pwm_u16_16,
2193 uint32_t frame_ramp, 2193 uint32_t frame_ramp)
2194 const struct dc_stream_state *stream)
2195{ 2194{
2196 struct dc *core_dc = link->ctx->dc; 2195 struct dc *core_dc = link->ctx->dc;
2197 struct abm *abm = core_dc->res_pool->abm; 2196 struct abm *abm = core_dc->res_pool->abm;
@@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
2206 (abm->funcs->set_backlight_level_pwm == NULL)) 2205 (abm->funcs->set_backlight_level_pwm == NULL))
2207 return false; 2206 return false;
2208 2207
2209 if (stream)
2210 ((struct dc_stream_state *)stream)->bl_pwm_level =
2211 backlight_pwm_u16_16;
2212
2213 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); 2208 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
2214 2209
2215 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", 2210 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
@@ -2637,11 +2632,6 @@ void core_link_enable_stream(
2637 2632
2638 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2633 if (dc_is_dp_signal(pipe_ctx->stream->signal))
2639 enable_stream_features(pipe_ctx); 2634 enable_stream_features(pipe_ctx);
2640
2641 dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
2642 pipe_ctx->stream->bl_pwm_level,
2643 0,
2644 pipe_ctx->stream);
2645 } 2635 }
2646 2636
2647} 2637}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 29f19d57ff7a..b2243e0dad1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
146 */ 146 */
147bool dc_link_set_backlight_level(const struct dc_link *dc_link, 147bool dc_link_set_backlight_level(const struct dc_link *dc_link,
148 uint32_t backlight_pwm_u16_16, 148 uint32_t backlight_pwm_u16_16,
149 uint32_t frame_ramp, 149 uint32_t frame_ramp);
150 const struct dc_stream_state *stream);
151 150
152int dc_link_get_backlight_level(const struct dc_link *dc_link); 151int dc_link_get_backlight_level(const struct dc_link *dc_link);
153 152
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index be34d638e15d..d70c9e1cda3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -91,7 +91,6 @@ struct dc_stream_state {
91 91
92 /* DMCU info */ 92 /* DMCU info */
93 unsigned int abm_level; 93 unsigned int abm_level;
94 unsigned int bl_pwm_level;
95 94
96 /* from core_stream struct */ 95 /* from core_stream struct */
97 struct dc_context *ctx; 96 struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index afd287f08bc9..7a72ee46f14b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements(
591 dc, 591 dc,
592 context->bw.dce.sclk_khz); 592 context->bw.dce.sclk_khz);
593 593
594 pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; 594 /*
595 * As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
596 * This is not required for less than 5 displays,
597 * thus don't request decfclk in dc to avoid impact
598 * on power saving.
599 *
600 */
601 pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)?
602 pp_display_cfg->min_engine_clock_khz : 0;
595 603
596 pp_display_cfg->min_engine_clock_deep_sleep_khz 604 pp_display_cfg->min_engine_clock_deep_sleep_khz
597 = context->bw.dce.sclk_deep_sleep_khz; 605 = context->bw.dce.sclk_deep_sleep_khz;
@@ -654,6 +662,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
654{ 662{
655 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 663 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
656 struct dm_pp_power_level_change_request level_change_req; 664 struct dm_pp_power_level_change_request level_change_req;
665 int patched_disp_clk = context->bw.dce.dispclk_khz;
666
667 /*TODO: W/A for dal3 linux, investigate why this works */
668 if (!clk_mgr_dce->dfs_bypass_active)
669 patched_disp_clk = patched_disp_clk * 115 / 100;
657 670
658 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 671 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
659 /* get max clock state from PPLIB */ 672 /* get max clock state from PPLIB */
@@ -663,9 +676,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
663 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; 676 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
664 } 677 }
665 678
666 if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { 679 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
667 context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); 680 context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
668 clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; 681 clk_mgr->clks.dispclk_khz = patched_disp_clk;
669 } 682 }
670 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 683 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
671} 684}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index acd418515346..a6b80fdaa666 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -37,6 +37,10 @@ void dce100_prepare_bandwidth(
37 struct dc *dc, 37 struct dc *dc,
38 struct dc_state *context); 38 struct dc_state *context);
39 39
40void dce100_optimize_bandwidth(
41 struct dc *dc,
42 struct dc_state *context);
43
40bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, 44bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
41 struct dc_bios *dcb, 45 struct dc_bios *dcb,
42 enum pipe_gating_control power_gating); 46 enum pipe_gating_control power_gating);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4bf24758217f..8f09b8625c5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
1000 1000
1001 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); 1001 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
1002 1002
1003 if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) 1003 if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
1004 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ 1004 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
1005 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); 1005 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
1006 /* un-mute audio */ 1006 /* un-mute audio */
@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1017 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( 1017 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
1018 pipe_ctx->stream_res.stream_enc, true); 1018 pipe_ctx->stream_res.stream_enc, true);
1019 if (pipe_ctx->stream_res.audio) { 1019 if (pipe_ctx->stream_res.audio) {
1020 struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
1021
1020 if (option != KEEP_ACQUIRED_RESOURCE || 1022 if (option != KEEP_ACQUIRED_RESOURCE ||
1021 !dc->debug.az_endpoint_mute_only) { 1023 !dc->debug.az_endpoint_mute_only) {
1022 /*only disalbe az_endpoint if power down or free*/ 1024 /*only disalbe az_endpoint if power down or free*/
@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1036 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); 1038 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
1037 pipe_ctx->stream_res.audio = NULL; 1039 pipe_ctx->stream_res.audio = NULL;
1038 } 1040 }
1041 if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
1042 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
1043 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
1039 1044
1040 /* TODO: notify audio driver for if audio modes list changed 1045 /* TODO: notify audio driver for if audio modes list changed
1041 * add audio mode list change flag */ 1046 * add audio mode list change flag */
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index a60a90e68d91..c4543178ba20 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc)
77 dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; 77 dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
78 dc->hwss.pipe_control_lock = dce_pipe_control_lock; 78 dc->hwss.pipe_control_lock = dce_pipe_control_lock;
79 dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; 79 dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
80 dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; 80 dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
81} 81}
82 82
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index cdd1d6b7b9f2..4e9ea50141bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -790,9 +790,22 @@ bool dce80_validate_bandwidth(
790 struct dc *dc, 790 struct dc *dc,
791 struct dc_state *context) 791 struct dc_state *context)
792{ 792{
793 /* TODO implement when needed but for now hardcode max value*/ 793 int i;
794 context->bw.dce.dispclk_khz = 681000; 794 bool at_least_one_pipe = false;
795 context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; 795
796 for (i = 0; i < dc->res_pool->pipe_count; i++) {
797 if (context->res_ctx.pipe_ctx[i].stream)
798 at_least_one_pipe = true;
799 }
800
801 if (at_least_one_pipe) {
802 /* TODO implement when needed but for now hardcode max value*/
803 context->bw.dce.dispclk_khz = 681000;
804 context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
805 } else {
806 context->bw.dce.dispclk_khz = 0;
807 context->bw.dce.yclk_khz = 0;
808 }
796 809
797 return true; 810 return true;
798} 811}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index dcb3c5530236..cd1ebe57ed59 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
463 if (src_y_offset >= (int)param->viewport.height) 463 if (src_y_offset >= (int)param->viewport.height)
464 cur_en = 0; /* not visible beyond bottom edge*/ 464 cur_en = 0; /* not visible beyond bottom edge*/
465 465
466 if (src_y_offset < 0) 466 if (src_y_offset + (int)height <= 0)
467 cur_en = 0; /* not visible beyond top edge*/ 467 cur_en = 0; /* not visible beyond top edge*/
468 468
469 REG_UPDATE(CURSOR0_CONTROL, 469 REG_UPDATE(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 345af015d061..d1acd7165bc8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position(
1140 if (src_y_offset >= (int)param->viewport.height) 1140 if (src_y_offset >= (int)param->viewport.height)
1141 cur_en = 0; /* not visible beyond bottom edge*/ 1141 cur_en = 0; /* not visible beyond bottom edge*/
1142 1142
1143 if (src_y_offset < 0) //+ (int)hubp->curs_attr.height 1143 if (src_y_offset + (int)hubp->curs_attr.height <= 0)
1144 cur_en = 0; /* not visible beyond top edge*/ 1144 cur_en = 0; /* not visible beyond top edge*/
1145 1145
1146 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) 1146 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 91e015e14355..41883c981789 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface(
2355 top_pipe_to_program->plane_state->update_flags.bits.full_update) 2355 top_pipe_to_program->plane_state->update_flags.bits.full_update)
2356 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2356 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2357 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2357 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2358 2358 tg = pipe_ctx->stream_res.tg;
2359 /* Skip inactive pipes and ones already updated */ 2359 /* Skip inactive pipes and ones already updated */
2360 if (!pipe_ctx->stream || pipe_ctx->stream == stream 2360 if (!pipe_ctx->stream || pipe_ctx->stream == stream
2361 || !pipe_ctx->plane_state) 2361 || !pipe_ctx->plane_state
2362 || !tg->funcs->is_tg_enabled(tg))
2362 continue; 2363 continue;
2363 2364
2364 pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); 2365 tg->funcs->lock(tg);
2365 2366
2366 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( 2367 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2367 pipe_ctx->plane_res.hubp, 2368 pipe_ctx->plane_res.hubp,
2368 &pipe_ctx->dlg_regs, 2369 &pipe_ctx->dlg_regs,
2369 &pipe_ctx->ttu_regs); 2370 &pipe_ctx->ttu_regs);
2370 }
2371
2372 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2373 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2374 2371
2375 if (!pipe_ctx->stream || pipe_ctx->stream == stream 2372 tg->funcs->unlock(tg);
2376 || !pipe_ctx->plane_state) 2373 }
2377 continue;
2378
2379 dcn10_pipe_control_lock(dc, pipe_ctx, false);
2380 }
2381 2374
2382 if (num_planes == 0) 2375 if (num_planes == 0)
2383 false_optc_underflow_wa(dc, stream, tg); 2376 false_optc_underflow_wa(dc, stream, tg);
@@ -2665,8 +2658,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2665 .mirror = pipe_ctx->plane_state->horizontal_mirror 2658 .mirror = pipe_ctx->plane_state->horizontal_mirror
2666 }; 2659 };
2667 2660
2668 pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x; 2661 pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
2669 pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y; 2662 pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
2670 2663
2671 if (pipe_ctx->plane_state->address.type 2664 if (pipe_ctx->plane_state->address.type
2672 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) 2665 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 00f63b7dd32f..c11a443dcbc8 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
57#define NUM_POWER_FN_SEGS 8 57#define NUM_POWER_FN_SEGS 8
58#define NUM_BL_CURVE_SEGS 16 58#define NUM_BL_CURVE_SEGS 16
59 59
60#pragma pack(push, 1)
60/* NOTE: iRAM is 256B in size */ 61/* NOTE: iRAM is 256B in size */
61struct iram_table_v_2 { 62struct iram_table_v_2 {
62 /* flags */ 63 /* flags */
@@ -100,6 +101,7 @@ struct iram_table_v_2 {
100 uint8_t dummy8; /* 0xfe */ 101 uint8_t dummy8; /* 0xfe */
101 uint8_t dummy9; /* 0xff */ 102 uint8_t dummy9; /* 0xff */
102}; 103};
104#pragma pack(pop)
103 105
104static uint16_t backlight_8_to_16(unsigned int backlight_8bit) 106static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
105{ 107{
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 1479ea1dc3e7..789c4f288485 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -127,12 +127,13 @@ enum amd_pp_task {
127}; 127};
128 128
129enum PP_SMC_POWER_PROFILE { 129enum PP_SMC_POWER_PROFILE {
130 PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0, 130 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
131 PP_SMC_POWER_PROFILE_POWERSAVING = 0x1, 131 PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
132 PP_SMC_POWER_PROFILE_VIDEO = 0x2, 132 PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
133 PP_SMC_POWER_PROFILE_VR = 0x3, 133 PP_SMC_POWER_PROFILE_VIDEO = 0x3,
134 PP_SMC_POWER_PROFILE_COMPUTE = 0x4, 134 PP_SMC_POWER_PROFILE_VR = 0x4,
135 PP_SMC_POWER_PROFILE_CUSTOM = 0x5, 135 PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
136 PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
136}; 137};
137 138
138enum { 139enum {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 0173d0480024..310b102a9292 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -64,17 +64,19 @@ static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
64 64
65static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) 65static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
66{ 66{
67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2; 67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0; 68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1; 69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
70 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3; 70 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
71 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4; 71 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
72 72 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
73 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING; 73
74 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO; 74 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
75 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 75 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
76 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR; 76 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
77 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE; 77 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
78 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
79 hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
78} 80}
79 81
80int hwmgr_early_init(struct pp_hwmgr *hwmgr) 82int hwmgr_early_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index f95c5f50eb0f..5273de3c5b98 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1033 break; 1033 break;
1034 case amd_pp_dpp_clock: 1034 case amd_pp_dpp_clock:
1035 pclk_vol_table = pinfo->vdd_dep_on_dppclk; 1035 pclk_vol_table = pinfo->vdd_dep_on_dppclk;
1036 break;
1036 default: 1037 default:
1037 return -EINVAL; 1038 return -EINVAL;
1038 } 1039 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index d91390459326..c8f5c00dd1e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -77,8 +77,9 @@
77#define PCIE_BUS_CLK 10000 77#define PCIE_BUS_CLK 10000
78#define TCLK (PCIE_BUS_CLK / 10) 78#define TCLK (PCIE_BUS_CLK / 10)
79 79
80static const struct profile_mode_setting smu7_profiling[6] = 80static const struct profile_mode_setting smu7_profiling[7] =
81 {{1, 0, 100, 30, 1, 0, 100, 10}, 81 {{0, 0, 0, 0, 0, 0, 0, 0},
82 {1, 0, 100, 30, 1, 0, 100, 10},
82 {1, 10, 0, 30, 0, 0, 0, 0}, 83 {1, 10, 0, 30, 0, 0, 0, 0},
83 {0, 0, 0, 0, 1, 10, 16, 31}, 84 {0, 0, 0, 0, 1, 10, 16, 31},
84 {1, 0, 11, 50, 1, 0, 100, 10}, 85 {1, 0, 11, 50, 1, 0, 100, 10},
@@ -4889,7 +4890,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4889 uint32_t i, size = 0; 4890 uint32_t i, size = 0;
4890 uint32_t len; 4891 uint32_t len;
4891 4892
4892 static const char *profile_name[6] = {"3D_FULL_SCREEN", 4893 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4894 "3D_FULL_SCREEN",
4893 "POWER_SAVING", 4895 "POWER_SAVING",
4894 "VIDEO", 4896 "VIDEO",
4895 "VR", 4897 "VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 79c86247d0ac..91e3bbe6d61d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -804,9 +804,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
804 804
805 hwmgr->backend = data; 805 hwmgr->backend = data;
806 806
807 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; 807 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
808 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 808 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
809 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 809 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
810 810
811 vega10_set_default_registry_data(hwmgr); 811 vega10_set_default_registry_data(hwmgr);
812 data->disable_dpm_mask = 0xff; 812 data->disable_dpm_mask = 0xff;
@@ -4668,13 +4668,15 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4668{ 4668{
4669 struct vega10_hwmgr *data = hwmgr->backend; 4669 struct vega10_hwmgr *data = hwmgr->backend;
4670 uint32_t i, size = 0; 4670 uint32_t i, size = 0;
4671 static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,}, 4671 static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
4672 {70, 60, 1, 3,},
4672 {90, 60, 0, 0,}, 4673 {90, 60, 0, 0,},
4673 {70, 60, 0, 0,}, 4674 {70, 60, 0, 0,},
4674 {70, 90, 0, 0,}, 4675 {70, 90, 0, 0,},
4675 {30, 60, 0, 6,}, 4676 {30, 60, 0, 6,},
4676 }; 4677 };
4677 static const char *profile_name[6] = {"3D_FULL_SCREEN", 4678 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4679 "3D_FULL_SCREEN",
4678 "POWER_SAVING", 4680 "POWER_SAVING",
4679 "VIDEO", 4681 "VIDEO",
4680 "VR", 4682 "VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index b8747a5c9204..99d596dc0e89 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -32,6 +32,7 @@
32#include "vega10_pptable.h" 32#include "vega10_pptable.h"
33 33
34#define NUM_DSPCLK_LEVELS 8 34#define NUM_DSPCLK_LEVELS 8
35#define VEGA10_ENGINECLOCK_HARDMAX 198000
35 36
36static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, 37static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
37 enum phm_platform_caps cap) 38 enum phm_platform_caps cap)
@@ -258,7 +259,26 @@ static int init_over_drive_limits(
258 struct pp_hwmgr *hwmgr, 259 struct pp_hwmgr *hwmgr,
259 const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) 260 const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
260{ 261{
261 hwmgr->platform_descriptor.overdriveLimit.engineClock = 262 const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
263 (const ATOM_Vega10_GFXCLK_Dependency_Table *)
264 (((unsigned long) powerplay_table) +
265 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
266 bool is_acg_enabled = false;
267 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
268
269 if (gfxclk_dep_table->ucRevId == 1) {
270 patom_record_v2 =
271 (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
272 is_acg_enabled =
273 (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
274 }
275
276 if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
277 !is_acg_enabled)
278 hwmgr->platform_descriptor.overdriveLimit.engineClock =
279 VEGA10_ENGINECLOCK_HARDMAX;
280 else
281 hwmgr->platform_descriptor.overdriveLimit.engineClock =
262 le32_to_cpu(powerplay_table->ulMaxODEngineClock); 282 le32_to_cpu(powerplay_table->ulMaxODEngineClock);
263 hwmgr->platform_descriptor.overdriveLimit.memoryClock = 283 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
264 le32_to_cpu(powerplay_table->ulMaxODMemoryClock); 284 le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 54364444ecd1..0c8212902275 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
753 return 0; 753 return 0;
754} 754}
755 755
756static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
757{
758 uint32_t result;
759
760 PP_ASSERT_WITH_CODE(
761 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
762 "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
763 return -EINVAL);
764
765 result = smum_get_argument(hwmgr);
766 PP_ASSERT_WITH_CODE(result == 1,
767 "Failed to run ACG BTC!", return -EINVAL);
768
769 return 0;
770}
771
756static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) 772static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
757{ 773{
758 struct vega12_hwmgr *data = 774 struct vega12_hwmgr *data =
@@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
931 "Failed to initialize SMC table!", 947 "Failed to initialize SMC table!",
932 result = tmp_result); 948 result = tmp_result);
933 949
950 tmp_result = vega12_run_acg_btc(hwmgr);
951 PP_ASSERT_WITH_CODE(!tmp_result,
952 "Failed to run ACG BTC!",
953 result = tmp_result);
954
934 result = vega12_enable_all_smu_features(hwmgr); 955 result = vega12_enable_all_smu_features(hwmgr);
935 PP_ASSERT_WITH_CODE(!result, 956 PP_ASSERT_WITH_CODE(!result,
936 "Failed to enable all smu features!", 957 "Failed to enable all smu features!",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 26154f9b2178..82935a3bd950 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -390,9 +390,9 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
390 390
391 hwmgr->backend = data; 391 hwmgr->backend = data;
392 392
393 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; 393 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
394 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 394 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
395 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 395 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
396 396
397 vega20_set_default_registry_data(hwmgr); 397 vega20_set_default_registry_data(hwmgr);
398 398
@@ -980,6 +980,9 @@ static int vega20_od8_set_feature_capabilities(
980 pp_table->FanZeroRpmEnable) 980 pp_table->FanZeroRpmEnable)
981 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; 981 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
982 982
983 if (!od_settings->overdrive8_capabilities)
984 hwmgr->od_enabled = false;
985
983 return 0; 986 return 0;
984} 987}
985 988
@@ -1689,13 +1692,6 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
1689 (PPCLK_UCLK << 16) | (min_freq & 0xffff))), 1692 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1690 "Failed to set soft min memclk !", 1693 "Failed to set soft min memclk !",
1691 return ret); 1694 return ret);
1692
1693 min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
1694 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1695 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1696 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1697 "Failed to set hard min memclk !",
1698 return ret);
1699 } 1695 }
1700 1696
1701 if (data->smu_features[GNLD_DPM_UVD].enabled && 1697 if (data->smu_features[GNLD_DPM_UVD].enabled &&
@@ -2248,6 +2244,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2248 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2244 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2249 soft_max_level = mask ? (fls(mask) - 1) : 0; 2245 soft_max_level = mask ? (fls(mask) - 1) : 0;
2250 2246
2247 if (soft_max_level >= data->dpm_table.gfx_table.count) {
2248 pr_err("Clock level specified %d is over max allowed %d\n",
2249 soft_max_level,
2250 data->dpm_table.gfx_table.count - 1);
2251 return -EINVAL;
2252 }
2253
2251 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2254 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2252 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2255 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2253 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2256 data->dpm_table.gfx_table.dpm_state.soft_max_level =
@@ -2268,6 +2271,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2268 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2271 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2269 soft_max_level = mask ? (fls(mask) - 1) : 0; 2272 soft_max_level = mask ? (fls(mask) - 1) : 0;
2270 2273
2274 if (soft_max_level >= data->dpm_table.mem_table.count) {
2275 pr_err("Clock level specified %d is over max allowed %d\n",
2276 soft_max_level,
2277 data->dpm_table.mem_table.count - 1);
2278 return -EINVAL;
2279 }
2280
2271 data->dpm_table.mem_table.dpm_state.soft_min_level = 2281 data->dpm_table.mem_table.dpm_state.soft_min_level =
2272 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2282 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2273 data->dpm_table.mem_table.dpm_state.soft_max_level = 2283 data->dpm_table.mem_table.dpm_state.soft_max_level =
@@ -3261,6 +3271,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
3261 int pplib_workload = 0; 3271 int pplib_workload = 0;
3262 3272
3263 switch (power_profile) { 3273 switch (power_profile) {
3274 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
3275 pplib_workload = WORKLOAD_DEFAULT_BIT;
3276 break;
3264 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 3277 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
3265 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 3278 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
3266 break; 3279 break;
@@ -3290,6 +3303,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
3290 uint32_t i, size = 0; 3303 uint32_t i, size = 0;
3291 uint16_t workload_type = 0; 3304 uint16_t workload_type = 0;
3292 static const char *profile_name[] = { 3305 static const char *profile_name[] = {
3306 "BOOTUP_DEFAULT",
3293 "3D_FULL_SCREEN", 3307 "3D_FULL_SCREEN",
3294 "POWER_SAVING", 3308 "POWER_SAVING",
3295 "VIDEO", 3309 "VIDEO",
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 0d298a0409f5..8cb831b6a016 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -705,7 +705,7 @@ enum PP_TABLE_VERSION {
705/** 705/**
706 * The main hardware manager structure. 706 * The main hardware manager structure.
707 */ 707 */
708#define Workload_Policy_Max 5 708#define Workload_Policy_Max 6
709 709
710struct pp_hwmgr { 710struct pp_hwmgr {
711 void *adev; 711 void *adev;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8e28e738cb52..e6403b9549f1 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -98,6 +98,8 @@
98#define DP0_STARTVAL 0x064c 98#define DP0_STARTVAL 0x064c
99#define DP0_ACTIVEVAL 0x0650 99#define DP0_ACTIVEVAL 0x0650
100#define DP0_SYNCVAL 0x0654 100#define DP0_SYNCVAL 0x0654
101#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
102#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
101#define DP0_MISC 0x0658 103#define DP0_MISC 0x0658
102#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ 104#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
103#define BPC_6 (0 << 5) 105#define BPC_6 (0 << 5)
@@ -142,6 +144,8 @@
142#define DP0_LTLOOPCTRL 0x06d8 144#define DP0_LTLOOPCTRL 0x06d8
143#define DP0_SNKLTCTRL 0x06e4 145#define DP0_SNKLTCTRL 0x06e4
144 146
147#define DP1_SRCCTRL 0x07a0
148
145/* PHY */ 149/* PHY */
146#define DP_PHY_CTRL 0x0800 150#define DP_PHY_CTRL 0x0800
147#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ 151#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
@@ -150,6 +154,7 @@
150#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ 154#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
151#define PHY_RDY BIT(16) /* PHY Main Channels Ready */ 155#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
152#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ 156#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
157#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
153#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ 158#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
154#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ 159#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
155 160
@@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
540 unsigned long rate; 545 unsigned long rate;
541 u32 value; 546 u32 value;
542 int ret; 547 int ret;
548 u32 dp_phy_ctrl;
543 549
544 rate = clk_get_rate(tc->refclk); 550 rate = clk_get_rate(tc->refclk);
545 switch (rate) { 551 switch (rate) {
@@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
564 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; 570 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
565 tc_write(SYS_PLLPARAM, value); 571 tc_write(SYS_PLLPARAM, value);
566 572
567 tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN); 573 dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
574 if (tc->link.base.num_lanes == 2)
575 dp_phy_ctrl |= PHY_2LANE;
576 tc_write(DP_PHY_CTRL, dp_phy_ctrl);
568 577
569 /* 578 /*
570 * Initially PLLs are in bypass. Force PLL parameter update, 579 * Initially PLLs are in bypass. Force PLL parameter update,
@@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
719 728
720 tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); 729 tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
721 730
722 tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0)); 731 tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
732 ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
733 ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
723 734
724 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | 735 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
725 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); 736 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
@@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
829 if (!tc->mode) 840 if (!tc->mode)
830 return -EINVAL; 841 return -EINVAL;
831 842
832 /* from excel file - DP0_SrcCtrl */ 843 tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
833 tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B | 844 /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
834 DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 | 845 tc_write(DP1_SRCCTRL,
835 DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT); 846 (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
836 /* from excel file - DP1_SrcCtrl */ 847 ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
837 tc_write(0x07a0, 0x00003083);
838 848
839 rate = clk_get_rate(tc->refclk); 849 rate = clk_get_rate(tc->refclk);
840 switch (rate) { 850 switch (rate) {
@@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
855 } 865 }
856 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; 866 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
857 tc_write(SYS_PLLPARAM, value); 867 tc_write(SYS_PLLPARAM, value);
868
858 /* Setup Main Link */ 869 /* Setup Main Link */
859 dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN; 870 dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
871 if (tc->link.base.num_lanes == 2)
872 dp_phy_ctrl |= PHY_2LANE;
860 tc_write(DP_PHY_CTRL, dp_phy_ctrl); 873 tc_write(DP_PHY_CTRL, dp_phy_ctrl);
861 msleep(100); 874 msleep(100);
862 875
@@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
1105static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector, 1118static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
1106 struct drm_display_mode *mode) 1119 struct drm_display_mode *mode)
1107{ 1120{
1121 struct tc_data *tc = connector_to_tc(connector);
1122 u32 req, avail;
1123 u32 bits_per_pixel = 24;
1124
1108 /* DPI interface clock limitation: upto 154 MHz */ 1125 /* DPI interface clock limitation: upto 154 MHz */
1109 if (mode->clock > 154000) 1126 if (mode->clock > 154000)
1110 return MODE_CLOCK_HIGH; 1127 return MODE_CLOCK_HIGH;
1111 1128
1129 req = mode->clock * bits_per_pixel / 8;
1130 avail = tc->link.base.num_lanes * tc->link.base.rate;
1131
1132 if (req > avail)
1133 return MODE_BAD;
1134
1112 return MODE_OK; 1135 return MODE_OK;
1113} 1136}
1114 1137
@@ -1186,7 +1209,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
1186 /* Create eDP connector */ 1209 /* Create eDP connector */
1187 drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs); 1210 drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
1188 ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, 1211 ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
1189 DRM_MODE_CONNECTOR_eDP); 1212 tc->panel ? DRM_MODE_CONNECTOR_eDP :
1213 DRM_MODE_CONNECTOR_DisplayPort);
1190 if (ret) 1214 if (ret)
1191 return ret; 1215 return ret;
1192 1216
@@ -1195,6 +1219,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
1195 1219
1196 drm_display_info_set_bus_formats(&tc->connector.display_info, 1220 drm_display_info_set_bus_formats(&tc->connector.display_info,
1197 &bus_format, 1); 1221 &bus_format, 1);
1222 tc->connector.display_info.bus_flags =
1223 DRM_BUS_FLAG_DE_HIGH |
1224 DRM_BUS_FLAG_PIXDATA_NEGEDGE |
1225 DRM_BUS_FLAG_SYNC_NEGEDGE;
1198 drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); 1226 drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
1199 1227
1200 return 0; 1228 return 0;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c40889888a16..9a1f41adfc67 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1296,12 +1296,11 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1296 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 1296 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1297 return -EINVAL; 1297 return -EINVAL;
1298 1298
1299 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1300
1301 state = drm_atomic_state_alloc(dev); 1299 state = drm_atomic_state_alloc(dev);
1302 if (!state) 1300 if (!state)
1303 return -ENOMEM; 1301 return -ENOMEM;
1304 1302
1303 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1305 state->acquire_ctx = &ctx; 1304 state->acquire_ctx = &ctx;
1306 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 1305 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1307 1306
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 2d6c491a0542..516e82d0ed50 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1273,6 +1273,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
1273 { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1273 { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
1274 /* LG LP140WF6-SPM1 eDP panel */ 1274 /* LG LP140WF6-SPM1 eDP panel */
1275 { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1275 { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
1276 /* Apple panels need some additional handling to support PSR */
1277 { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
1276}; 1278};
1277 1279
1278#undef OUI 1280#undef OUI
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d3af098b0922..d73703a695e8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1621,6 +1621,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
1621 var_1->transp.msb_right == var_2->transp.msb_right; 1621 var_1->transp.msb_right == var_2->transp.msb_right;
1622} 1622}
1623 1623
1624static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
1625 u8 depth)
1626{
1627 switch (depth) {
1628 case 8:
1629 var->red.offset = 0;
1630 var->green.offset = 0;
1631 var->blue.offset = 0;
1632 var->red.length = 8; /* 8bit DAC */
1633 var->green.length = 8;
1634 var->blue.length = 8;
1635 var->transp.offset = 0;
1636 var->transp.length = 0;
1637 break;
1638 case 15:
1639 var->red.offset = 10;
1640 var->green.offset = 5;
1641 var->blue.offset = 0;
1642 var->red.length = 5;
1643 var->green.length = 5;
1644 var->blue.length = 5;
1645 var->transp.offset = 15;
1646 var->transp.length = 1;
1647 break;
1648 case 16:
1649 var->red.offset = 11;
1650 var->green.offset = 5;
1651 var->blue.offset = 0;
1652 var->red.length = 5;
1653 var->green.length = 6;
1654 var->blue.length = 5;
1655 var->transp.offset = 0;
1656 break;
1657 case 24:
1658 var->red.offset = 16;
1659 var->green.offset = 8;
1660 var->blue.offset = 0;
1661 var->red.length = 8;
1662 var->green.length = 8;
1663 var->blue.length = 8;
1664 var->transp.offset = 0;
1665 var->transp.length = 0;
1666 break;
1667 case 32:
1668 var->red.offset = 16;
1669 var->green.offset = 8;
1670 var->blue.offset = 0;
1671 var->red.length = 8;
1672 var->green.length = 8;
1673 var->blue.length = 8;
1674 var->transp.offset = 24;
1675 var->transp.length = 8;
1676 break;
1677 default:
1678 break;
1679 }
1680}
1681
1624/** 1682/**
1625 * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var 1683 * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
1626 * @var: screeninfo to check 1684 * @var: screeninfo to check
@@ -1632,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1632 struct drm_fb_helper *fb_helper = info->par; 1690 struct drm_fb_helper *fb_helper = info->par;
1633 struct drm_framebuffer *fb = fb_helper->fb; 1691 struct drm_framebuffer *fb = fb_helper->fb;
1634 1692
1635 if (var->pixclock != 0 || in_dbg_master()) 1693 if (in_dbg_master())
1636 return -EINVAL; 1694 return -EINVAL;
1637 1695
1696 if (var->pixclock != 0) {
1697 DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
1698 var->pixclock = 0;
1699 }
1700
1638 if ((drm_format_info_block_width(fb->format, 0) > 1) || 1701 if ((drm_format_info_block_width(fb->format, 0) > 1) ||
1639 (drm_format_info_block_height(fb->format, 0) > 1)) 1702 (drm_format_info_block_height(fb->format, 0) > 1))
1640 return -EINVAL; 1703 return -EINVAL;
@@ -1655,6 +1718,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1655 } 1718 }
1656 1719
1657 /* 1720 /*
1721 * Workaround for SDL 1.2, which is known to be setting all pixel format
1722 * fields values to zero in some cases. We treat this situation as a
1723 * kind of "use some reasonable autodetected values".
1724 */
1725 if (!var->red.offset && !var->green.offset &&
1726 !var->blue.offset && !var->transp.offset &&
1727 !var->red.length && !var->green.length &&
1728 !var->blue.length && !var->transp.length &&
1729 !var->red.msb_right && !var->green.msb_right &&
1730 !var->blue.msb_right && !var->transp.msb_right) {
1731 drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
1732 }
1733
1734 /*
1658 * drm fbdev emulation doesn't support changing the pixel format at all, 1735 * drm fbdev emulation doesn't support changing the pixel format at all,
1659 * so reject all pixel format changing requests. 1736 * so reject all pixel format changing requests.
1660 */ 1737 */
@@ -1967,59 +2044,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
1967 info->var.yoffset = 0; 2044 info->var.yoffset = 0;
1968 info->var.activate = FB_ACTIVATE_NOW; 2045 info->var.activate = FB_ACTIVATE_NOW;
1969 2046
1970 switch (fb->format->depth) { 2047 drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
1971 case 8:
1972 info->var.red.offset = 0;
1973 info->var.green.offset = 0;
1974 info->var.blue.offset = 0;
1975 info->var.red.length = 8; /* 8bit DAC */
1976 info->var.green.length = 8;
1977 info->var.blue.length = 8;
1978 info->var.transp.offset = 0;
1979 info->var.transp.length = 0;
1980 break;
1981 case 15:
1982 info->var.red.offset = 10;
1983 info->var.green.offset = 5;
1984 info->var.blue.offset = 0;
1985 info->var.red.length = 5;
1986 info->var.green.length = 5;
1987 info->var.blue.length = 5;
1988 info->var.transp.offset = 15;
1989 info->var.transp.length = 1;
1990 break;
1991 case 16:
1992 info->var.red.offset = 11;
1993 info->var.green.offset = 5;
1994 info->var.blue.offset = 0;
1995 info->var.red.length = 5;
1996 info->var.green.length = 6;
1997 info->var.blue.length = 5;
1998 info->var.transp.offset = 0;
1999 break;
2000 case 24:
2001 info->var.red.offset = 16;
2002 info->var.green.offset = 8;
2003 info->var.blue.offset = 0;
2004 info->var.red.length = 8;
2005 info->var.green.length = 8;
2006 info->var.blue.length = 8;
2007 info->var.transp.offset = 0;
2008 info->var.transp.length = 0;
2009 break;
2010 case 32:
2011 info->var.red.offset = 16;
2012 info->var.green.offset = 8;
2013 info->var.blue.offset = 0;
2014 info->var.red.length = 8;
2015 info->var.green.length = 8;
2016 info->var.blue.length = 8;
2017 info->var.transp.offset = 24;
2018 info->var.transp.length = 8;
2019 break;
2020 default:
2021 break;
2022 }
2023 2048
2024 info->var.xres = fb_width; 2049 info->var.xres = fb_width;
2025 info->var.yres = fb_height; 2050 info->var.yres = fb_height;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 99cba8ea5d82..5df1256618cc 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -528,7 +528,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
528 528
529 object_count = cl->object_count; 529 object_count = cl->object_count;
530 530
531 object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32)); 531 object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
532 array_size(object_count, sizeof(__u32)));
532 if (IS_ERR(object_ids)) 533 if (IS_ERR(object_ids))
533 return PTR_ERR(object_ids); 534 return PTR_ERR(object_ids);
534 535
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index cd9bc0ce9be0..004191d01772 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -459,11 +459,11 @@ static int set_property_atomic(struct drm_mode_object *obj,
459 struct drm_modeset_acquire_ctx ctx; 459 struct drm_modeset_acquire_ctx ctx;
460 int ret; 460 int ret;
461 461
462 drm_modeset_acquire_init(&ctx, 0);
463
464 state = drm_atomic_state_alloc(dev); 462 state = drm_atomic_state_alloc(dev);
465 if (!state) 463 if (!state)
466 return -ENOMEM; 464 return -ENOMEM;
465
466 drm_modeset_acquire_init(&ctx, 0);
467 state->acquire_ctx = &ctx; 467 state->acquire_ctx = &ctx;
468retry: 468retry:
469 if (prop == state->dev->mode_config.dpms_property) { 469 if (prop == state->dev->mode_config.dpms_property) {
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 24a750436559..f91e02c87fd8 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
758 if (mode->hsync) 758 if (mode->hsync)
759 return mode->hsync; 759 return mode->hsync;
760 760
761 if (mode->htotal < 0) 761 if (mode->htotal <= 0)
762 return 0; 762 return 0;
763 763
764 calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ 764 calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index a9d9df6c85ad..693748ad8b88 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -61,8 +61,9 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
61 return NULL; 61 return NULL;
62 62
63 dmah->size = size; 63 dmah->size = size;
64 dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, 64 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
65 GFP_KERNEL | __GFP_COMP); 65 &dmah->busaddr,
66 GFP_KERNEL | __GFP_COMP);
66 67
67 if (dmah->vaddr == NULL) { 68 if (dmah->vaddr == NULL) {
68 kfree(dmah); 69 kfree(dmah);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index b5475c91e2ef..e9f343b124b0 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2799 MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2799 MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2800 MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2800 MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2801 MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2801 MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2802 MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2802 return 0; 2803 return 0;
2803} 2804}
2804 2805
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 5af11cf1b482..e1675a00df12 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -41,7 +41,7 @@ struct intel_gvt_mpt {
41 int (*host_init)(struct device *dev, void *gvt, const void *ops); 41 int (*host_init)(struct device *dev, void *gvt, const void *ops);
42 void (*host_exit)(struct device *dev, void *gvt); 42 void (*host_exit)(struct device *dev, void *gvt);
43 int (*attach_vgpu)(void *vgpu, unsigned long *handle); 43 int (*attach_vgpu)(void *vgpu, unsigned long *handle);
44 void (*detach_vgpu)(unsigned long handle); 44 void (*detach_vgpu)(void *vgpu);
45 int (*inject_msi)(unsigned long handle, u32 addr, u16 data); 45 int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
46 unsigned long (*from_virt_to_mfn)(void *p); 46 unsigned long (*from_virt_to_mfn)(void *p);
47 int (*enable_page_track)(unsigned long handle, u64 gfn); 47 int (*enable_page_track)(unsigned long handle, u64 gfn);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c1072143da1d..dd3dfd00f4e6 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
996{ 996{
997 unsigned int index; 997 unsigned int index;
998 u64 virtaddr; 998 u64 virtaddr;
999 unsigned long req_size, pgoff = 0; 999 unsigned long req_size, pgoff, req_start;
1000 pgprot_t pg_prot; 1000 pgprot_t pg_prot;
1001 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); 1001 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1002 1002
@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1014 pg_prot = vma->vm_page_prot; 1014 pg_prot = vma->vm_page_prot;
1015 virtaddr = vma->vm_start; 1015 virtaddr = vma->vm_start;
1016 req_size = vma->vm_end - vma->vm_start; 1016 req_size = vma->vm_end - vma->vm_start;
1017 pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; 1017 pgoff = vma->vm_pgoff &
1018 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1019 req_start = pgoff << PAGE_SHIFT;
1020
1021 if (!intel_vgpu_in_aperture(vgpu, req_start))
1022 return -EINVAL;
1023 if (req_start + req_size >
1024 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1025 return -EINVAL;
1026
1027 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1018 1028
1019 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); 1029 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1020} 1030}
@@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1662 return 0; 1672 return 0;
1663} 1673}
1664 1674
1665static void kvmgt_detach_vgpu(unsigned long handle) 1675static void kvmgt_detach_vgpu(void *p_vgpu)
1666{ 1676{
1667 /* nothing to do here */ 1677 int i;
1678 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1679
1680 if (!vgpu->vdev.region)
1681 return;
1682
1683 for (i = 0; i < vgpu->vdev.num_regions; i++)
1684 if (vgpu->vdev.region[i].ops->release)
1685 vgpu->vdev.region[i].ops->release(vgpu,
1686 &vgpu->vdev.region[i]);
1687 vgpu->vdev.num_regions = 0;
1688 kfree(vgpu->vdev.region);
1689 vgpu->vdev.region = NULL;
1668} 1690}
1669 1691
1670static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) 1692static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 67f19992b226..3ed34123d8d1 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
101 if (!intel_gvt_host.mpt->detach_vgpu) 101 if (!intel_gvt_host.mpt->detach_vgpu)
102 return; 102 return;
103 103
104 intel_gvt_host.mpt->detach_vgpu(vgpu->handle); 104 intel_gvt_host.mpt->detach_vgpu(vgpu);
105} 105}
106 106
107#define MSI_CAP_CONTROL(offset) (offset + 2) 107#define MSI_CAP_CONTROL(offset) (offset + 2)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1ad8c5e1455d..55bb7885e228 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
332 332
333 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); 333 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
334 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 334 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
335
336 wa_ctx->indirect_ctx.obj = NULL;
337 wa_ctx->indirect_ctx.shadow_va = NULL;
335} 338}
336 339
337static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, 340static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
@@ -356,6 +359,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
356 return 0; 359 return 0;
357} 360}
358 361
362static int
363intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
364{
365 struct intel_vgpu *vgpu = workload->vgpu;
366 struct intel_vgpu_submission *s = &vgpu->submission;
367 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
368 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
369 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
370 struct i915_request *rq;
371 int ret = 0;
372
373 lockdep_assert_held(&dev_priv->drm.struct_mutex);
374
375 if (workload->req)
376 goto out;
377
378 rq = i915_request_alloc(engine, shadow_ctx);
379 if (IS_ERR(rq)) {
380 gvt_vgpu_err("fail to allocate gem request\n");
381 ret = PTR_ERR(rq);
382 goto out;
383 }
384 workload->req = i915_request_get(rq);
385out:
386 return ret;
387}
388
359/** 389/**
360 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and 390 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
361 * shadow it as well, include ringbuffer,wa_ctx and ctx. 391 * shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -372,12 +402,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
372 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 402 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
373 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; 403 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
374 struct intel_context *ce; 404 struct intel_context *ce;
375 struct i915_request *rq;
376 int ret; 405 int ret;
377 406
378 lockdep_assert_held(&dev_priv->drm.struct_mutex); 407 lockdep_assert_held(&dev_priv->drm.struct_mutex);
379 408
380 if (workload->req) 409 if (workload->shadow)
381 return 0; 410 return 0;
382 411
383 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); 412 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
@@ -417,22 +446,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
417 goto err_shadow; 446 goto err_shadow;
418 } 447 }
419 448
420 rq = i915_request_alloc(engine, shadow_ctx); 449 workload->shadow = true;
421 if (IS_ERR(rq)) {
422 gvt_vgpu_err("fail to allocate gem request\n");
423 ret = PTR_ERR(rq);
424 goto err_shadow;
425 }
426 workload->req = i915_request_get(rq);
427
428 ret = populate_shadow_context(workload);
429 if (ret)
430 goto err_req;
431
432 return 0; 450 return 0;
433err_req:
434 rq = fetch_and_zero(&workload->req);
435 i915_request_put(rq);
436err_shadow: 451err_shadow:
437 release_shadow_wa_ctx(&workload->wa_ctx); 452 release_shadow_wa_ctx(&workload->wa_ctx);
438err_unpin: 453err_unpin:
@@ -671,23 +686,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
671 mutex_lock(&vgpu->vgpu_lock); 686 mutex_lock(&vgpu->vgpu_lock);
672 mutex_lock(&dev_priv->drm.struct_mutex); 687 mutex_lock(&dev_priv->drm.struct_mutex);
673 688
689 ret = intel_gvt_workload_req_alloc(workload);
690 if (ret)
691 goto err_req;
692
674 ret = intel_gvt_scan_and_shadow_workload(workload); 693 ret = intel_gvt_scan_and_shadow_workload(workload);
675 if (ret) 694 if (ret)
676 goto out; 695 goto out;
677 696
678 ret = prepare_workload(workload); 697 ret = populate_shadow_context(workload);
698 if (ret) {
699 release_shadow_wa_ctx(&workload->wa_ctx);
700 goto out;
701 }
679 702
703 ret = prepare_workload(workload);
680out: 704out:
681 if (ret)
682 workload->status = ret;
683
684 if (!IS_ERR_OR_NULL(workload->req)) { 705 if (!IS_ERR_OR_NULL(workload->req)) {
685 gvt_dbg_sched("ring id %d submit workload to i915 %p\n", 706 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
686 ring_id, workload->req); 707 ring_id, workload->req);
687 i915_request_add(workload->req); 708 i915_request_add(workload->req);
688 workload->dispatched = true; 709 workload->dispatched = true;
689 } 710 }
690 711err_req:
712 if (ret)
713 workload->status = ret;
691 mutex_unlock(&dev_priv->drm.struct_mutex); 714 mutex_unlock(&dev_priv->drm.struct_mutex);
692 mutex_unlock(&vgpu->vgpu_lock); 715 mutex_unlock(&vgpu->vgpu_lock);
693 return ret; 716 return ret;
@@ -891,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
891 914
892 list_del_init(&workload->list); 915 list_del_init(&workload->list);
893 916
894 if (!workload->status) {
895 release_shadow_batch_buffer(workload);
896 release_shadow_wa_ctx(&workload->wa_ctx);
897 }
898
899 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { 917 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
900 /* if workload->status is not successful means HW GPU 918 /* if workload->status is not successful means HW GPU
901 * has occurred GPU hang or something wrong with i915/GVT, 919 * has occurred GPU hang or something wrong with i915/GVT,
@@ -1263,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1263{ 1281{
1264 struct intel_vgpu_submission *s = &workload->vgpu->submission; 1282 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1265 1283
1284 release_shadow_batch_buffer(workload);
1285 release_shadow_wa_ctx(&workload->wa_ctx);
1286
1266 if (workload->shadow_mm) 1287 if (workload->shadow_mm)
1267 intel_vgpu_mm_put(workload->shadow_mm); 1288 intel_vgpu_mm_put(workload->shadow_mm);
1268 1289
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index ca5529d0e48e..2065cba59aab 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -83,6 +83,7 @@ struct intel_vgpu_workload {
83 struct i915_request *req; 83 struct i915_request *req;
84 /* if this workload has been dispatched to i915? */ 84 /* if this workload has been dispatched to i915? */
85 bool dispatched; 85 bool dispatched;
86 bool shadow; /* if workload has done shadow of guest request */
86 int status; 87 int status;
87 88
88 struct intel_vgpu_mm *shadow_mm; 89 struct intel_vgpu_mm *shadow_mm;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 38dcee1ca062..40a61ef9aac1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -984,8 +984,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
984 intel_runtime_pm_get(i915); 984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915); 985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915); 986 intel_runtime_pm_put(i915);
987 if (!gpu) 987 if (IS_ERR(gpu))
988 return -ENOMEM; 988 return PTR_ERR(gpu);
989 989
990 file->private_data = gpu; 990 file->private_data = gpu;
991 return 0; 991 return 0;
@@ -1018,7 +1018,13 @@ i915_error_state_write(struct file *filp,
1018 1018
1019static int i915_error_state_open(struct inode *inode, struct file *file) 1019static int i915_error_state_open(struct inode *inode, struct file *file)
1020{ 1020{
1021 file->private_data = i915_first_error_state(inode->i_private); 1021 struct i915_gpu_state *error;
1022
1023 error = i915_first_error_state(inode->i_private);
1024 if (IS_ERR(error))
1025 return PTR_ERR(error);
1026
1027 file->private_data = error;
1022 return 0; 1028 return 0;
1023} 1029}
1024 1030
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 216f52b744a6..c882ea94172c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1824 return 0; 1824 return 0;
1825} 1825}
1826 1826
1827static inline bool
1828__vma_matches(struct vm_area_struct *vma, struct file *filp,
1829 unsigned long addr, unsigned long size)
1830{
1831 if (vma->vm_file != filp)
1832 return false;
1833
1834 return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
1835}
1836
1827/** 1837/**
1828 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 1838 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1829 * it is mapped to. 1839 * it is mapped to.
@@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1882 return -EINTR; 1892 return -EINTR;
1883 } 1893 }
1884 vma = find_vma(mm, addr); 1894 vma = find_vma(mm, addr);
1885 if (vma) 1895 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
1886 vma->vm_page_prot = 1896 vma->vm_page_prot =
1887 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1897 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1888 else 1898 else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index add1fe7aeb93..bd17dd1f5da5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2075,6 +2075,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
2075int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) 2075int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2076{ 2076{
2077 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 2077 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2078 int err;
2078 2079
2079 /* 2080 /*
2080 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt 2081 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -2090,9 +2091,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2090 * allocator works in address space sizes, so it's multiplied by page 2091 * allocator works in address space sizes, so it's multiplied by page
2091 * size. We allocate at the top of the GTT to avoid fragmentation. 2092 * size. We allocate at the top of the GTT to avoid fragmentation.
2092 */ 2093 */
2093 return i915_vma_pin(ppgtt->vma, 2094 err = i915_vma_pin(ppgtt->vma,
2094 0, GEN6_PD_ALIGN, 2095 0, GEN6_PD_ALIGN,
2095 PIN_GLOBAL | PIN_HIGH); 2096 PIN_GLOBAL | PIN_HIGH);
2097 if (err)
2098 goto unpin;
2099
2100 return 0;
2101
2102unpin:
2103 ppgtt->pin_count = 0;
2104 return err;
2096} 2105}
2097 2106
2098void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) 2107void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 07465123c166..3f9ce403c755 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1907,9 +1907,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
1907{ 1907{
1908 struct i915_gpu_state *error; 1908 struct i915_gpu_state *error;
1909 1909
1910 /* Check if GPU capture has been disabled */
1911 error = READ_ONCE(i915->gpu_error.first_error);
1912 if (IS_ERR(error))
1913 return error;
1914
1910 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1915 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1911 if (!error) 1916 if (!error) {
1912 return NULL; 1917 i915_disable_error_state(i915, -ENOMEM);
1918 return ERR_PTR(-ENOMEM);
1919 }
1913 1920
1914 kref_init(&error->ref); 1921 kref_init(&error->ref);
1915 error->i915 = i915; 1922 error->i915 = i915;
@@ -1945,11 +1952,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
1945 return; 1952 return;
1946 1953
1947 error = i915_capture_gpu_state(i915); 1954 error = i915_capture_gpu_state(i915);
1948 if (!error) { 1955 if (IS_ERR(error))
1949 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1950 i915_disable_error_state(i915, -ENOMEM);
1951 return; 1956 return;
1952 }
1953 1957
1954 i915_error_capture_msg(i915, error, engine_mask, error_msg); 1958 i915_error_capture_msg(i915, error, engine_mask, error_msg);
1955 DRM_INFO("%s\n", error->error_msg); 1959 DRM_INFO("%s\n", error->error_msg);
@@ -1987,7 +1991,7 @@ i915_first_error_state(struct drm_i915_private *i915)
1987 1991
1988 spin_lock_irq(&i915->gpu_error.lock); 1992 spin_lock_irq(&i915->gpu_error.lock);
1989 error = i915->gpu_error.first_error; 1993 error = i915->gpu_error.first_error;
1990 if (error) 1994 if (!IS_ERR_OR_NULL(error))
1991 i915_gpu_state_get(error); 1995 i915_gpu_state_get(error);
1992 spin_unlock_irq(&i915->gpu_error.lock); 1996 spin_unlock_irq(&i915->gpu_error.lock);
1993 1997
@@ -2000,10 +2004,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
2000 2004
2001 spin_lock_irq(&i915->gpu_error.lock); 2005 spin_lock_irq(&i915->gpu_error.lock);
2002 error = i915->gpu_error.first_error; 2006 error = i915->gpu_error.first_error;
2003 i915->gpu_error.first_error = NULL; 2007 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2008 i915->gpu_error.first_error = NULL;
2004 spin_unlock_irq(&i915->gpu_error.lock); 2009 spin_unlock_irq(&i915->gpu_error.lock);
2005 2010
2006 if (!IS_ERR(error)) 2011 if (!IS_ERR_OR_NULL(error))
2007 i915_gpu_state_put(error); 2012 i915_gpu_state_put(error);
2008} 2013}
2009 2014
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d6c8f8fdfda5..017fc602a10e 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event)
594 * Update the bitmask of enabled events and increment 594 * Update the bitmask of enabled events and increment
595 * the event reference counter. 595 * the event reference counter.
596 */ 596 */
597 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 597 BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
598 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
598 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); 599 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
599 i915->pmu.enable |= BIT_ULL(bit); 600 i915->pmu.enable |= BIT_ULL(bit);
600 i915->pmu.enable_count[bit]++; 601 i915->pmu.enable_count[bit]++;
@@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event)
615 engine = intel_engine_lookup_user(i915, 616 engine = intel_engine_lookup_user(i915,
616 engine_event_class(event), 617 engine_event_class(event),
617 engine_event_instance(event)); 618 engine_event_instance(event));
618 GEM_BUG_ON(!engine);
619 engine->pmu.enable |= BIT(sample);
620 619
621 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 620 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
621 I915_ENGINE_SAMPLE_COUNT);
622 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
623 I915_ENGINE_SAMPLE_COUNT);
624 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
625 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
622 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 626 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
627
628 engine->pmu.enable |= BIT(sample);
623 engine->pmu.enable_count[sample]++; 629 engine->pmu.enable_count[sample]++;
624 } 630 }
625 631
@@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event)
649 engine = intel_engine_lookup_user(i915, 655 engine = intel_engine_lookup_user(i915,
650 engine_event_class(event), 656 engine_event_class(event),
651 engine_event_instance(event)); 657 engine_event_instance(event));
652 GEM_BUG_ON(!engine); 658
653 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 659 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
660 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
654 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 661 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
662
655 /* 663 /*
656 * Decrement the reference count and clear the enabled 664 * Decrement the reference count and clear the enabled
657 * bitmask when the last listener on an event goes away. 665 * bitmask when the last listener on an event goes away.
@@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event)
660 engine->pmu.enable &= ~BIT(sample); 668 engine->pmu.enable &= ~BIT(sample);
661 } 669 }
662 670
663 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 671 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
664 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); 672 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
665 /* 673 /*
666 * Decrement the reference count and clear the enabled 674 * Decrement the reference count and clear the enabled
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 7f164ca3db12..b3728c5f13e7 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -31,6 +31,8 @@ enum {
31 ((1 << I915_PMU_SAMPLE_BITS) + \ 31 ((1 << I915_PMU_SAMPLE_BITS) + \
32 (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) 32 (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
33 33
34#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
35
34struct i915_pmu_sample { 36struct i915_pmu_sample {
35 u64 cur; 37 u64 cur;
36}; 38};
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0a7d60509ca7..067054cf4a86 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1790,7 +1790,7 @@ enum i915_power_well_id {
1790#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 1790#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40
1791#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 1791#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40
1792#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 1792#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840
1793#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ 1793#define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \
1794 _CNL_PORT_TX_AE_GRP_OFFSET, \ 1794 _CNL_PORT_TX_AE_GRP_OFFSET, \
1795 _CNL_PORT_TX_B_GRP_OFFSET, \ 1795 _CNL_PORT_TX_B_GRP_OFFSET, \
1796 _CNL_PORT_TX_B_GRP_OFFSET, \ 1796 _CNL_PORT_TX_B_GRP_OFFSET, \
@@ -1798,7 +1798,7 @@ enum i915_power_well_id {
1798 _CNL_PORT_TX_AE_GRP_OFFSET, \ 1798 _CNL_PORT_TX_AE_GRP_OFFSET, \
1799 _CNL_PORT_TX_F_GRP_OFFSET) + \ 1799 _CNL_PORT_TX_F_GRP_OFFSET) + \
1800 4 * (dw)) 1800 4 * (dw))
1801#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ 1801#define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \
1802 _CNL_PORT_TX_AE_LN0_OFFSET, \ 1802 _CNL_PORT_TX_AE_LN0_OFFSET, \
1803 _CNL_PORT_TX_B_LN0_OFFSET, \ 1803 _CNL_PORT_TX_B_LN0_OFFSET, \
1804 _CNL_PORT_TX_B_LN0_OFFSET, \ 1804 _CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1834,9 +1834,9 @@ enum i915_power_well_id {
1834 1834
1835#define _CNL_PORT_TX_DW4_LN0_AE 0x162450 1835#define _CNL_PORT_TX_DW4_LN0_AE 0x162450
1836#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 1836#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
1837#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) 1837#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
1838#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) 1838#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
1839#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ 1839#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
1840 ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ 1840 ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
1841 _CNL_PORT_TX_DW4_LN0_AE))) 1841 _CNL_PORT_TX_DW4_LN0_AE)))
1842#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) 1842#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
@@ -1864,8 +1864,12 @@ enum i915_power_well_id {
1864#define RTERM_SELECT(x) ((x) << 3) 1864#define RTERM_SELECT(x) ((x) << 3)
1865#define RTERM_SELECT_MASK (0x7 << 3) 1865#define RTERM_SELECT_MASK (0x7 << 3)
1866 1866
1867#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) 1867#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
1868#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) 1868#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
1869#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
1870#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
1871#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
1872#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
1869#define N_SCALAR(x) ((x) << 24) 1873#define N_SCALAR(x) ((x) << 24)
1870#define N_SCALAR_MASK (0x7F << 24) 1874#define N_SCALAR_MASK (0x7F << 24)
1871 1875
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 535caebd9813..c0cfe7ae2ba5 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
521 ssize_t ret; 521 ssize_t ret;
522 522
523 gpu = i915_first_error_state(i915); 523 gpu = i915_first_error_state(i915);
524 if (gpu) { 524 if (IS_ERR(gpu)) {
525 ret = PTR_ERR(gpu);
526 } else if (gpu) {
525 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); 527 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
526 i915_gpu_state_put(gpu); 528 i915_gpu_state_put(gpu);
527 } else { 529 } else {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f3e1d6a0b7dd..7edce1b7b348 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
494 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ 494 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
495}; 495};
496 496
497struct icl_combo_phy_ddi_buf_trans { 497/* icl_combo_phy_ddi_translations */
498 u32 dw2_swing_select; 498static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
499 u32 dw2_swing_scalar; 499 /* NT mV Trans mV db */
500 u32 dw4_scaling; 500 { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
501}; 501 { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
502 502 { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
503/* Voltage Swing Programming for VccIO 0.85V for DP */ 503 { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
504static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { 504 { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
505 /* Voltage mV db */ 505 { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
506 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ 506 { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
507 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ 507 { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
508 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ 508 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
509 { 0x2, 0x98, 0x900F }, /* 400 9.5 */ 509 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
510 { 0xB, 0x70, 0x0018 }, /* 600 0.0 */
511 { 0xB, 0x70, 0x3015 }, /* 600 3.5 */
512 { 0xB, 0x70, 0x6012 }, /* 600 6.0 */
513 { 0x5, 0x00, 0x0018 }, /* 800 0.0 */
514 { 0x5, 0x00, 0x3015 }, /* 800 3.5 */
515 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
516};
517
518/* FIXME - After table is updated in Bspec */
519/* Voltage Swing Programming for VccIO 0.85V for eDP */
520static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
521 /* Voltage mV db */
522 { 0x0, 0x00, 0x00 }, /* 200 0.0 */
523 { 0x0, 0x00, 0x00 }, /* 200 1.5 */
524 { 0x0, 0x00, 0x00 }, /* 200 4.0 */
525 { 0x0, 0x00, 0x00 }, /* 200 6.0 */
526 { 0x0, 0x00, 0x00 }, /* 250 0.0 */
527 { 0x0, 0x00, 0x00 }, /* 250 1.5 */
528 { 0x0, 0x00, 0x00 }, /* 250 4.0 */
529 { 0x0, 0x00, 0x00 }, /* 300 0.0 */
530 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
531 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
532};
533
534/* Voltage Swing Programming for VccIO 0.95V for DP */
535static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
536 /* Voltage mV db */
537 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
538 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
539 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
540 { 0x2, 0x98, 0x900F }, /* 400 9.5 */
541 { 0x4, 0x98, 0x0018 }, /* 600 0.0 */
542 { 0x4, 0x98, 0x3015 }, /* 600 3.5 */
543 { 0x4, 0x98, 0x6012 }, /* 600 6.0 */
544 { 0x5, 0x76, 0x0018 }, /* 800 0.0 */
545 { 0x5, 0x76, 0x3015 }, /* 800 3.5 */
546 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
547}; 510};
548 511
549/* FIXME - After table is updated in Bspec */ 512static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
550/* Voltage Swing Programming for VccIO 0.95V for eDP */ 513 /* NT mV Trans mV db */
551static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { 514 { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
552 /* Voltage mV db */ 515 { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
553 { 0x0, 0x00, 0x00 }, /* 200 0.0 */ 516 { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
554 { 0x0, 0x00, 0x00 }, /* 200 1.5 */ 517 { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
555 { 0x0, 0x00, 0x00 }, /* 200 4.0 */ 518 { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
556 { 0x0, 0x00, 0x00 }, /* 200 6.0 */ 519 { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
557 { 0x0, 0x00, 0x00 }, /* 250 0.0 */ 520 { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
558 { 0x0, 0x00, 0x00 }, /* 250 1.5 */ 521 { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
559 { 0x0, 0x00, 0x00 }, /* 250 4.0 */ 522 { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
560 { 0x0, 0x00, 0x00 }, /* 300 0.0 */ 523 { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
561 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
562 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
563}; 524};
564 525
565/* Voltage Swing Programming for VccIO 1.05V for DP */ 526static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
566static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { 527 /* NT mV Trans mV db */
567 /* Voltage mV db */ 528 { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
568 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ 529 { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
569 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ 530 { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
570 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ 531 { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
571 { 0x2, 0x98, 0x900F }, /* 400 9.5 */ 532 { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
572 { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ 533 { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
573 { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ 534 { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
574 { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ 535 { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
575 { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ 536 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
576 { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ 537 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
577 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
578}; 538};
579 539
580/* FIXME - After table is updated in Bspec */ 540static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
581/* Voltage Swing Programming for VccIO 1.05V for eDP */ 541 /* NT mV Trans mV db */
582static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { 542 { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
583 /* Voltage mV db */ 543 { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
584 { 0x0, 0x00, 0x00 }, /* 200 0.0 */ 544 { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
585 { 0x0, 0x00, 0x00 }, /* 200 1.5 */ 545 { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
586 { 0x0, 0x00, 0x00 }, /* 200 4.0 */ 546 { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
587 { 0x0, 0x00, 0x00 }, /* 200 6.0 */ 547 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
588 { 0x0, 0x00, 0x00 }, /* 250 0.0 */ 548 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
589 { 0x0, 0x00, 0x00 }, /* 250 1.5 */
590 { 0x0, 0x00, 0x00 }, /* 250 4.0 */
591 { 0x0, 0x00, 0x00 }, /* 300 0.0 */
592 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
593 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
594}; 549};
595 550
596struct icl_mg_phy_ddi_buf_trans { 551struct icl_mg_phy_ddi_buf_trans {
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
871 } 826 }
872} 827}
873 828
874static const struct icl_combo_phy_ddi_buf_trans * 829static const struct cnl_ddi_buf_trans *
875icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, 830icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
876 int type, int *n_entries) 831 int type, int rate, int *n_entries)
877{ 832{
878 u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; 833 if (type == INTEL_OUTPUT_HDMI) {
879 834 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
880 if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { 835 return icl_combo_phy_ddi_translations_hdmi;
881 switch (voltage) { 836 } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) {
882 case VOLTAGE_INFO_0_85V: 837 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
883 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); 838 return icl_combo_phy_ddi_translations_edp_hbr3;
884 return icl_combo_phy_ddi_translations_edp_0_85V; 839 } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
885 case VOLTAGE_INFO_0_95V: 840 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
886 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); 841 return icl_combo_phy_ddi_translations_edp_hbr2;
887 return icl_combo_phy_ddi_translations_edp_0_95V;
888 case VOLTAGE_INFO_1_05V:
889 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
890 return icl_combo_phy_ddi_translations_edp_1_05V;
891 default:
892 MISSING_CASE(voltage);
893 return NULL;
894 }
895 } else {
896 switch (voltage) {
897 case VOLTAGE_INFO_0_85V:
898 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
899 return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
900 case VOLTAGE_INFO_0_95V:
901 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
902 return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
903 case VOLTAGE_INFO_1_05V:
904 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
905 return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
906 default:
907 MISSING_CASE(voltage);
908 return NULL;
909 }
910 } 842 }
843
844 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
845 return icl_combo_phy_ddi_translations_dp_hbr2;
911} 846}
912 847
913static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) 848static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
918 853
919 if (IS_ICELAKE(dev_priv)) { 854 if (IS_ICELAKE(dev_priv)) {
920 if (intel_port_is_combophy(dev_priv, port)) 855 if (intel_port_is_combophy(dev_priv, port))
921 icl_get_combo_buf_trans(dev_priv, port, 856 icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
922 INTEL_OUTPUT_HDMI, &n_entries); 857 0, &n_entries);
923 else 858 else
924 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); 859 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
925 default_entry = n_entries - 1; 860 default_entry = n_entries - 1;
@@ -1086,7 +1021,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
1086 return DDI_CLK_SEL_TBT_810; 1021 return DDI_CLK_SEL_TBT_810;
1087 default: 1022 default:
1088 MISSING_CASE(clock); 1023 MISSING_CASE(clock);
1089 break; 1024 return DDI_CLK_SEL_NONE;
1090 } 1025 }
1091 case DPLL_ID_ICL_MGPLL1: 1026 case DPLL_ID_ICL_MGPLL1:
1092 case DPLL_ID_ICL_MGPLL2: 1027 case DPLL_ID_ICL_MGPLL2:
@@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
2275u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) 2210u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
2276{ 2211{
2277 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2212 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2213 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2278 enum port port = encoder->port; 2214 enum port port = encoder->port;
2279 int n_entries; 2215 int n_entries;
2280 2216
2281 if (IS_ICELAKE(dev_priv)) { 2217 if (IS_ICELAKE(dev_priv)) {
2282 if (intel_port_is_combophy(dev_priv, port)) 2218 if (intel_port_is_combophy(dev_priv, port))
2283 icl_get_combo_buf_trans(dev_priv, port, encoder->type, 2219 icl_get_combo_buf_trans(dev_priv, port, encoder->type,
2284 &n_entries); 2220 intel_dp->link_rate, &n_entries);
2285 else 2221 else
2286 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); 2222 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
2287 } else if (IS_CANNONLAKE(dev_priv)) { 2223 } else if (IS_CANNONLAKE(dev_priv)) {
@@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
2462} 2398}
2463 2399
2464static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, 2400static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2465 u32 level, enum port port, int type) 2401 u32 level, enum port port, int type,
2402 int rate)
2466{ 2403{
2467 const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; 2404 const struct cnl_ddi_buf_trans *ddi_translations = NULL;
2468 u32 n_entries, val; 2405 u32 n_entries, val;
2469 int ln; 2406 int ln;
2470 2407
2471 ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, 2408 ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
2472 &n_entries); 2409 rate, &n_entries);
2473 if (!ddi_translations) 2410 if (!ddi_translations)
2474 return; 2411 return;
2475 2412
@@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2478 level = n_entries - 1; 2415 level = n_entries - 1;
2479 } 2416 }
2480 2417
2481 /* Set PORT_TX_DW5 Rterm Sel to 110b. */ 2418 /* Set PORT_TX_DW5 */
2482 val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); 2419 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
2483 val &= ~RTERM_SELECT_MASK; 2420 val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
2421 TAP2_DISABLE | TAP3_DISABLE);
2422 val |= SCALING_MODE_SEL(0x2);
2484 val |= RTERM_SELECT(0x6); 2423 val |= RTERM_SELECT(0x6);
2485 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2424 val |= TAP3_DISABLE;
2486
2487 /* Program PORT_TX_DW5 */
2488 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
2489 /* Set DisableTap2 and DisableTap3 if MIPI DSI
2490 * Clear DisableTap2 and DisableTap3 for all other Ports
2491 */
2492 if (type == INTEL_OUTPUT_DSI) {
2493 val |= TAP2_DISABLE;
2494 val |= TAP3_DISABLE;
2495 } else {
2496 val &= ~TAP2_DISABLE;
2497 val &= ~TAP3_DISABLE;
2498 }
2499 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2425 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
2500 2426
2501 /* Program PORT_TX_DW2 */ 2427 /* Program PORT_TX_DW2 */
2502 val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); 2428 val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
2503 val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | 2429 val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
2504 RCOMP_SCALAR_MASK); 2430 RCOMP_SCALAR_MASK);
2505 val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); 2431 val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
2506 val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); 2432 val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
2507 /* Program Rcomp scalar for every table entry */ 2433 /* Program Rcomp scalar for every table entry */
2508 val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); 2434 val |= RCOMP_SCALAR(0x98);
2509 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); 2435 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
2510 2436
2511 /* Program PORT_TX_DW4 */ 2437 /* Program PORT_TX_DW4 */
@@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2514 val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); 2440 val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
2515 val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | 2441 val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
2516 CURSOR_COEFF_MASK); 2442 CURSOR_COEFF_MASK);
2517 val |= ddi_translations[level].dw4_scaling; 2443 val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
2444 val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
2445 val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
2518 I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); 2446 I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
2519 } 2447 }
2448
2449 /* Program PORT_TX_DW7 */
2450 val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
2451 val &= ~N_SCALAR_MASK;
2452 val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
2453 I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
2520} 2454}
2521 2455
2522static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, 2456static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
2581 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2515 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
2582 2516
2583 /* 5. Program swing and de-emphasis */ 2517 /* 5. Program swing and de-emphasis */
2584 icl_ddi_combo_vswing_program(dev_priv, level, port, type); 2518 icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
2585 2519
2586 /* 6. Set training enable to trigger update */ 2520 /* 6. Set training enable to trigger update */
2587 val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); 2521 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3da9c0f9e948..248128126422 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15415 } 15415 }
15416} 15416}
15417 15417
15418static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
15419{
15420 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
15421
15422 /*
15423 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
15424 * the hardware when a high res displays plugged in. DPLL P
15425 * divider is zero, and the pipe timings are bonkers. We'll
15426 * try to disable everything in that case.
15427 *
15428 * FIXME would be nice to be able to sanitize this state
15429 * without several WARNs, but for now let's take the easy
15430 * road.
15431 */
15432 return IS_GEN6(dev_priv) &&
15433 crtc_state->base.active &&
15434 crtc_state->shared_dpll &&
15435 crtc_state->port_clock == 0;
15436}
15437
15418static void intel_sanitize_encoder(struct intel_encoder *encoder) 15438static void intel_sanitize_encoder(struct intel_encoder *encoder)
15419{ 15439{
15420 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 15440 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
15421 struct intel_connector *connector; 15441 struct intel_connector *connector;
15442 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
15443 struct intel_crtc_state *crtc_state = crtc ?
15444 to_intel_crtc_state(crtc->base.state) : NULL;
15422 15445
15423 /* We need to check both for a crtc link (meaning that the 15446 /* We need to check both for a crtc link (meaning that the
15424 * encoder is active and trying to read from a pipe) and the 15447 * encoder is active and trying to read from a pipe) and the
15425 * pipe itself being active. */ 15448 * pipe itself being active. */
15426 bool has_active_crtc = encoder->base.crtc && 15449 bool has_active_crtc = crtc_state &&
15427 to_intel_crtc(encoder->base.crtc)->active; 15450 crtc_state->base.active;
15451
15452 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
15453 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
15454 pipe_name(crtc->pipe));
15455 has_active_crtc = false;
15456 }
15428 15457
15429 connector = intel_encoder_find_connector(encoder); 15458 connector = intel_encoder_find_connector(encoder);
15430 if (connector && !has_active_crtc) { 15459 if (connector && !has_active_crtc) {
@@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15435 /* Connector is active, but has no active pipe. This is 15464 /* Connector is active, but has no active pipe. This is
15436 * fallout from our resume register restoring. Disable 15465 * fallout from our resume register restoring. Disable
15437 * the encoder manually again. */ 15466 * the encoder manually again. */
15438 if (encoder->base.crtc) { 15467 if (crtc_state) {
15439 struct drm_crtc_state *crtc_state = encoder->base.crtc->state; 15468 struct drm_encoder *best_encoder;
15440 15469
15441 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15470 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15442 encoder->base.base.id, 15471 encoder->base.base.id,
15443 encoder->base.name); 15472 encoder->base.name);
15473
15474 /* avoid oopsing in case the hooks consult best_encoder */
15475 best_encoder = connector->base.state->best_encoder;
15476 connector->base.state->best_encoder = &encoder->base;
15477
15444 if (encoder->disable) 15478 if (encoder->disable)
15445 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15479 encoder->disable(encoder, crtc_state,
15480 connector->base.state);
15446 if (encoder->post_disable) 15481 if (encoder->post_disable)
15447 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15482 encoder->post_disable(encoder, crtc_state,
15483 connector->base.state);
15484
15485 connector->base.state->best_encoder = best_encoder;
15448 } 15486 }
15449 encoder->base.crtc = NULL; 15487 encoder->base.crtc = NULL;
15450 15488
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fdd2cbc56fa3..22a74608c6e4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
304static int icl_max_source_rate(struct intel_dp *intel_dp) 304static int icl_max_source_rate(struct intel_dp *intel_dp)
305{ 305{
306 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 306 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
307 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
307 enum port port = dig_port->base.port; 308 enum port port = dig_port->base.port;
308 309
309 if (port == PORT_B) 310 if (intel_port_is_combophy(dev_priv, port) &&
311 !intel_dp_is_edp(intel_dp))
310 return 540000; 312 return 540000;
311 313
312 return 810000; 314 return 810000;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f94a04b4ad87..e9ddeaf05a14 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,16 @@ struct intel_fbdev {
209 unsigned long vma_flags; 209 unsigned long vma_flags;
210 async_cookie_t cookie; 210 async_cookie_t cookie;
211 int preferred_bpp; 211 int preferred_bpp;
212
213 /* Whether or not fbdev hpd processing is temporarily suspended */
214 bool hpd_suspended : 1;
215 /* Set when a hotplug was received while HPD processing was
216 * suspended
217 */
218 bool hpd_waiting : 1;
219
220 /* Protects hpd_suspended */
221 struct mutex hpd_lock;
212}; 222};
213 223
214struct intel_encoder { 224struct intel_encoder {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fb5bb5b32a60..4ee16b264dbe 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
336 bool *enabled, int width, int height) 336 bool *enabled, int width, int height)
337{ 337{
338 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); 338 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
339 unsigned long conn_configured, conn_seq, mask;
340 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); 339 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
340 unsigned long conn_configured, conn_seq;
341 int i, j; 341 int i, j;
342 bool *save_enabled; 342 bool *save_enabled;
343 bool fallback = true, ret = true; 343 bool fallback = true, ret = true;
@@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
355 drm_modeset_backoff(&ctx); 355 drm_modeset_backoff(&ctx);
356 356
357 memcpy(save_enabled, enabled, count); 357 memcpy(save_enabled, enabled, count);
358 mask = GENMASK(count - 1, 0); 358 conn_seq = GENMASK(count - 1, 0);
359 conn_configured = 0; 359 conn_configured = 0;
360retry: 360retry:
361 conn_seq = conn_configured;
362 for (i = 0; i < count; i++) { 361 for (i = 0; i < count; i++) {
363 struct drm_fb_helper_connector *fb_conn; 362 struct drm_fb_helper_connector *fb_conn;
364 struct drm_connector *connector; 363 struct drm_connector *connector;
@@ -371,7 +370,8 @@ retry:
371 if (conn_configured & BIT(i)) 370 if (conn_configured & BIT(i))
372 continue; 371 continue;
373 372
374 if (conn_seq == 0 && !connector->has_tile) 373 /* First pass, only consider tiled connectors */
374 if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
375 continue; 375 continue;
376 376
377 if (connector->status == connector_status_connected) 377 if (connector->status == connector_status_connected)
@@ -475,8 +475,10 @@ retry:
475 conn_configured |= BIT(i); 475 conn_configured |= BIT(i);
476 } 476 }
477 477
478 if ((conn_configured & mask) != mask && conn_configured != conn_seq) 478 if (conn_configured != conn_seq) { /* repeat until no more are found */
479 conn_seq = conn_configured;
479 goto retry; 480 goto retry;
481 }
480 482
481 /* 483 /*
482 * If the BIOS didn't enable everything it could, fall back to have the 484 * If the BIOS didn't enable everything it could, fall back to have the
@@ -679,6 +681,7 @@ int intel_fbdev_init(struct drm_device *dev)
679 if (ifbdev == NULL) 681 if (ifbdev == NULL)
680 return -ENOMEM; 682 return -ENOMEM;
681 683
684 mutex_init(&ifbdev->hpd_lock);
682 drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); 685 drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
683 686
684 if (!intel_fbdev_init_bios(dev, ifbdev)) 687 if (!intel_fbdev_init_bios(dev, ifbdev))
@@ -752,6 +755,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
752 intel_fbdev_destroy(ifbdev); 755 intel_fbdev_destroy(ifbdev);
753} 756}
754 757
758/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
759 * processing, fbdev will perform a full connector reprobe if a hotplug event
760 * was received while HPD was suspended.
761 */
762static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
763{
764 bool send_hpd = false;
765
766 mutex_lock(&ifbdev->hpd_lock);
767 ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
768 send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
769 ifbdev->hpd_waiting = false;
770 mutex_unlock(&ifbdev->hpd_lock);
771
772 if (send_hpd) {
773 DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
774 drm_fb_helper_hotplug_event(&ifbdev->helper);
775 }
776}
777
755void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) 778void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
756{ 779{
757 struct drm_i915_private *dev_priv = to_i915(dev); 780 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -773,6 +796,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
773 */ 796 */
774 if (state != FBINFO_STATE_RUNNING) 797 if (state != FBINFO_STATE_RUNNING)
775 flush_work(&dev_priv->fbdev_suspend_work); 798 flush_work(&dev_priv->fbdev_suspend_work);
799
776 console_lock(); 800 console_lock();
777 } else { 801 } else {
778 /* 802 /*
@@ -800,17 +824,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
800 824
801 drm_fb_helper_set_suspend(&ifbdev->helper, state); 825 drm_fb_helper_set_suspend(&ifbdev->helper, state);
802 console_unlock(); 826 console_unlock();
827
828 intel_fbdev_hpd_set_suspend(ifbdev, state);
803} 829}
804 830
805void intel_fbdev_output_poll_changed(struct drm_device *dev) 831void intel_fbdev_output_poll_changed(struct drm_device *dev)
806{ 832{
807 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 833 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
834 bool send_hpd;
808 835
809 if (!ifbdev) 836 if (!ifbdev)
810 return; 837 return;
811 838
812 intel_fbdev_sync(ifbdev); 839 intel_fbdev_sync(ifbdev);
813 if (ifbdev->vma || ifbdev->helper.deferred_setup) 840
841 mutex_lock(&ifbdev->hpd_lock);
842 send_hpd = !ifbdev->hpd_suspended;
843 ifbdev->hpd_waiting = true;
844 mutex_unlock(&ifbdev->hpd_lock);
845
846 if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
814 drm_fb_helper_hotplug_event(&ifbdev->helper); 847 drm_fb_helper_hotplug_event(&ifbdev->helper);
815} 848}
816 849
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4be167dcd209..eab9341a5152 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
303 */ 303 */
304 if (!(prio & I915_PRIORITY_NEWCLIENT)) { 304 if (!(prio & I915_PRIORITY_NEWCLIENT)) {
305 prio |= I915_PRIORITY_NEWCLIENT; 305 prio |= I915_PRIORITY_NEWCLIENT;
306 active->sched.attr.priority = prio;
306 list_move_tail(&active->sched.link, 307 list_move_tail(&active->sched.link,
307 i915_sched_lookup_priolist(engine, prio)); 308 i915_sched_lookup_priolist(engine, prio));
308 } 309 }
@@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
645 int i; 646 int i;
646 647
647 priolist_for_each_request_consume(rq, rn, p, i) { 648 priolist_for_each_request_consume(rq, rn, p, i) {
649 GEM_BUG_ON(last &&
650 need_preempt(engine, last, rq_prio(rq)));
651
648 /* 652 /*
649 * Can we combine this request with the current port? 653 * Can we combine this request with the current port?
650 * It has to be the same context/ringbuffer and not 654 * It has to be the same context/ringbuffer and not
@@ -2244,6 +2248,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
2244 if (ret) 2248 if (ret)
2245 return ret; 2249 return ret;
2246 2250
2251 intel_engine_init_workarounds(engine);
2252
2247 if (HAS_LOGICAL_RING_ELSQ(i915)) { 2253 if (HAS_LOGICAL_RING_ELSQ(i915)) {
2248 execlists->submit_reg = i915->regs + 2254 execlists->submit_reg = i915->regs +
2249 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); 2255 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
@@ -2310,7 +2316,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
2310 } 2316 }
2311 2317
2312 intel_engine_init_whitelist(engine); 2318 intel_engine_init_whitelist(engine);
2313 intel_engine_init_workarounds(engine);
2314 2319
2315 return 0; 2320 return 0;
2316} 2321}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b8f106d9ecf8..3ac20153705a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -55,7 +55,12 @@
55struct opregion_header { 55struct opregion_header {
56 u8 signature[16]; 56 u8 signature[16];
57 u32 size; 57 u32 size;
58 u32 opregion_ver; 58 struct {
59 u8 rsvd;
60 u8 revision;
61 u8 minor;
62 u8 major;
63 } __packed over;
59 u8 bios_ver[32]; 64 u8 bios_ver[32];
60 u8 vbios_ver[16]; 65 u8 vbios_ver[16];
61 u8 driver_ver[16]; 66 u8 driver_ver[16];
@@ -119,7 +124,8 @@ struct opregion_asle {
119 u64 fdss; 124 u64 fdss;
120 u32 fdsp; 125 u32 fdsp;
121 u32 stat; 126 u32 stat;
122 u64 rvda; /* Physical address of raw vbt data */ 127 u64 rvda; /* Physical (2.0) or relative from opregion (2.1+)
128 * address of raw VBT data. */
123 u32 rvds; /* Size of raw vbt data */ 129 u32 rvds; /* Size of raw vbt data */
124 u8 rsvd[58]; 130 u8 rsvd[58];
125} __packed; 131} __packed;
@@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
925 opregion->header = base; 931 opregion->header = base;
926 opregion->lid_state = base + ACPI_CLID; 932 opregion->lid_state = base + ACPI_CLID;
927 933
934 DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
935 opregion->header->over.major,
936 opregion->header->over.minor,
937 opregion->header->over.revision);
938
928 mboxes = opregion->header->mboxes; 939 mboxes = opregion->header->mboxes;
929 if (mboxes & MBOX_ACPI) { 940 if (mboxes & MBOX_ACPI) {
930 DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); 941 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
@@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
953 if (dmi_check_system(intel_no_opregion_vbt)) 964 if (dmi_check_system(intel_no_opregion_vbt))
954 goto out; 965 goto out;
955 966
956 if (opregion->header->opregion_ver >= 2 && opregion->asle && 967 if (opregion->header->over.major >= 2 && opregion->asle &&
957 opregion->asle->rvda && opregion->asle->rvds) { 968 opregion->asle->rvda && opregion->asle->rvds) {
958 opregion->rvda = memremap(opregion->asle->rvda, 969 resource_size_t rvda = opregion->asle->rvda;
959 opregion->asle->rvds, 970
971 /*
972 * opregion 2.0: rvda is the physical VBT address.
973 *
974 * opregion 2.1+: rvda is unsigned, relative offset from
975 * opregion base, and should never point within opregion.
976 */
977 if (opregion->header->over.major > 2 ||
978 opregion->header->over.minor >= 1) {
979 WARN_ON(rvda < OPREGION_SIZE);
980
981 rvda += asls;
982 }
983
984 opregion->rvda = memremap(rvda, opregion->asle->rvds,
960 MEMREMAP_WB); 985 MEMREMAP_WB);
986
961 vbt = opregion->rvda; 987 vbt = opregion->rvda;
962 vbt_size = opregion->asle->rvds; 988 vbt_size = opregion->asle->rvds;
963 if (intel_bios_is_valid_vbt(vbt, vbt_size)) { 989 if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
@@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
967 goto out; 993 goto out;
968 } else { 994 } else {
969 DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); 995 DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
996 memunmap(opregion->rvda);
997 opregion->rvda = NULL;
970 } 998 }
971 } 999 }
972 1000
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 419e56342523..f71970df9936 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -274,10 +274,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
274 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", 274 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
275 intel_dp->psr_dpcd[0]); 275 intel_dp->psr_dpcd[0]);
276 276
277 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
278 DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
279 return;
280 }
281
277 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 282 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
278 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); 283 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
279 return; 284 return;
280 } 285 }
286
281 dev_priv->psr.sink_support = true; 287 dev_priv->psr.sink_support = true;
282 dev_priv->psr.sink_sync_latency = 288 dev_priv->psr.sink_sync_latency =
283 intel_dp_get_sink_sync_latency(intel_dp); 289 intel_dp_get_sink_sync_latency(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 72edaa7ff411..a1a7cc29fdd1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -415,16 +415,17 @@ struct intel_engine_cs {
415 /** 415 /**
416 * @enable_count: Reference count for the enabled samplers. 416 * @enable_count: Reference count for the enabled samplers.
417 * 417 *
418 * Index number corresponds to the bit number from @enable. 418 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
419 */ 419 */
420 unsigned int enable_count[I915_PMU_SAMPLE_BITS]; 420 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
421 /** 421 /**
422 * @sample: Counter values for sampling events. 422 * @sample: Counter values for sampling events.
423 * 423 *
424 * Our internal timer stores the current counters in this field. 424 * Our internal timer stores the current counters in this field.
425 *
426 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
425 */ 427 */
426#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) 428 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
427 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
428 } pmu; 429 } pmu;
429 430
430 /* 431 /*
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d2e003d8f3db..5170a0f5fe7b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane,
494 494
495 keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); 495 keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
496 496
497 keymsk = key->channel_mask & 0x3ffffff; 497 keymsk = key->channel_mask & 0x7ffffff;
498 if (alpha < 0xff) 498 if (alpha < 0xff)
499 keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; 499 keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
500 500
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 2c5bbe317353..e31e263cf86b 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
643 int bus_format; 643 int bus_format;
644 644
645 ret = of_property_read_u32(child, "reg", &i); 645 ret = of_property_read_u32(child, "reg", &i);
646 if (ret || i < 0 || i > 1) 646 if (ret || i < 0 || i > 1) {
647 return -EINVAL; 647 ret = -EINVAL;
648 goto free_child;
649 }
648 650
649 if (!of_device_is_available(child)) 651 if (!of_device_is_available(child))
650 continue; 652 continue;
@@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
657 channel = &imx_ldb->channel[i]; 659 channel = &imx_ldb->channel[i];
658 channel->ldb = imx_ldb; 660 channel->ldb = imx_ldb;
659 channel->chno = i; 661 channel->chno = i;
660 channel->child = child;
661 662
662 /* 663 /*
663 * The output port is port@4 with an external 4-port mux or 664 * The output port is port@4 with an external 4-port mux or
@@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
667 imx_ldb->lvds_mux ? 4 : 2, 0, 668 imx_ldb->lvds_mux ? 4 : 2, 0,
668 &channel->panel, &channel->bridge); 669 &channel->panel, &channel->bridge);
669 if (ret && ret != -ENODEV) 670 if (ret && ret != -ENODEV)
670 return ret; 671 goto free_child;
671 672
672 /* panel ddc only if there is no bridge */ 673 /* panel ddc only if there is no bridge */
673 if (!channel->bridge) { 674 if (!channel->bridge) {
674 ret = imx_ldb_panel_ddc(dev, channel, child); 675 ret = imx_ldb_panel_ddc(dev, channel, child);
675 if (ret) 676 if (ret)
676 return ret; 677 goto free_child;
677 } 678 }
678 679
679 bus_format = of_get_bus_format(dev, child); 680 bus_format = of_get_bus_format(dev, child);
@@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
689 if (bus_format < 0) { 690 if (bus_format < 0) {
690 dev_err(dev, "could not determine data mapping: %d\n", 691 dev_err(dev, "could not determine data mapping: %d\n",
691 bus_format); 692 bus_format);
692 return bus_format; 693 ret = bus_format;
694 goto free_child;
693 } 695 }
694 channel->bus_format = bus_format; 696 channel->bus_format = bus_format;
697 channel->child = child;
695 698
696 ret = imx_ldb_register(drm, channel); 699 ret = imx_ldb_register(drm, channel);
697 if (ret) 700 if (ret) {
698 return ret; 701 channel->child = NULL;
702 goto free_child;
703 }
699 } 704 }
700 705
701 dev_set_drvdata(dev, imx_ldb); 706 dev_set_drvdata(dev, imx_ldb);
702 707
703 return 0; 708 return 0;
709
710free_child:
711 of_node_put(child);
712 return ret;
704} 713}
705 714
706static void imx_ldb_unbind(struct device *dev, struct device *master, 715static void imx_ldb_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index c390924de93d..21e964f6ab5c 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
370 if (ret) 370 if (ret)
371 return ret; 371 return ret;
372 372
373 /* CRTC should be enabled */ 373 /* nothing to check when disabling or disabled */
374 if (!crtc_state->enable) 374 if (!crtc_state->enable)
375 return -EINVAL; 375 return 0;
376 376
377 switch (plane->type) { 377 switch (plane->type) {
378 case DRM_PLANE_TYPE_PRIMARY: 378 case DRM_PLANE_TYPE_PRIMARY:
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 75d97f1b2e8f..4f5c67f70c4d 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -46,7 +46,6 @@ struct meson_crtc {
46 struct drm_crtc base; 46 struct drm_crtc base;
47 struct drm_pending_vblank_event *event; 47 struct drm_pending_vblank_event *event;
48 struct meson_drm *priv; 48 struct meson_drm *priv;
49 bool enabled;
50}; 49};
51#define to_meson_crtc(x) container_of(x, struct meson_crtc, base) 50#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
52 51
@@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
82 81
83}; 82};
84 83
85static void meson_crtc_enable(struct drm_crtc *crtc) 84static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
85 struct drm_crtc_state *old_state)
86{ 86{
87 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 87 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
88 struct drm_crtc_state *crtc_state = crtc->state; 88 struct drm_crtc_state *crtc_state = crtc->state;
@@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
108 108
109 drm_crtc_vblank_on(crtc); 109 drm_crtc_vblank_on(crtc);
110 110
111 meson_crtc->enabled = true;
112}
113
114static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
115 struct drm_crtc_state *old_state)
116{
117 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
118 struct meson_drm *priv = meson_crtc->priv;
119
120 DRM_DEBUG_DRIVER("\n");
121
122 if (!meson_crtc->enabled)
123 meson_crtc_enable(crtc);
124
125 priv->viu.osd1_enabled = true; 111 priv->viu.osd1_enabled = true;
126} 112}
127 113
@@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
153 139
154 crtc->state->event = NULL; 140 crtc->state->event = NULL;
155 } 141 }
156
157 meson_crtc->enabled = false;
158} 142}
159 143
160static void meson_crtc_atomic_begin(struct drm_crtc *crtc, 144static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
163 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 147 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
164 unsigned long flags; 148 unsigned long flags;
165 149
166 if (crtc->state->enable && !meson_crtc->enabled)
167 meson_crtc_enable(crtc);
168
169 if (crtc->state->event) { 150 if (crtc->state->event) {
170 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 151 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
171 152
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 3ee4d4a4ecba..12ff47b13668 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
75 .fb_create = drm_gem_fb_create, 75 .fb_create = drm_gem_fb_create,
76}; 76};
77 77
78static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
79 .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
80};
81
78static irqreturn_t meson_irq(int irq, void *arg) 82static irqreturn_t meson_irq(int irq, void *arg)
79{ 83{
80 struct drm_device *dev = arg; 84 struct drm_device *dev = arg;
@@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
266 drm->mode_config.max_width = 3840; 270 drm->mode_config.max_width = 3840;
267 drm->mode_config.max_height = 2160; 271 drm->mode_config.max_height = 2160;
268 drm->mode_config.funcs = &meson_mode_config_funcs; 272 drm->mode_config.funcs = &meson_mode_config_funcs;
273 drm->mode_config.helper_private = &meson_mode_config_helpers;
269 274
270 /* Hardware Initialization */ 275 /* Hardware Initialization */
271 276
@@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev,
388 remote_node = of_graph_get_remote_port_parent(ep); 393 remote_node = of_graph_get_remote_port_parent(ep);
389 if (!remote_node || 394 if (!remote_node ||
390 remote_node == parent || /* Ignore parent endpoint */ 395 remote_node == parent || /* Ignore parent endpoint */
391 !of_device_is_available(remote_node)) 396 !of_device_is_available(remote_node)) {
397 of_node_put(remote_node);
392 continue; 398 continue;
399 }
393 400
394 count += meson_probe_remote(pdev, match, remote, remote_node); 401 count += meson_probe_remote(pdev, match, remote, remote_node);
395 402
@@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev)
408 415
409 for_each_endpoint_of_node(np, ep) { 416 for_each_endpoint_of_node(np, ep) {
410 remote = of_graph_get_remote_port_parent(ep); 417 remote = of_graph_get_remote_port_parent(ep);
411 if (!remote || !of_device_is_available(remote)) 418 if (!remote || !of_device_is_available(remote)) {
419 of_node_put(remote);
412 continue; 420 continue;
421 }
413 422
414 count += meson_probe_remote(pdev, &match, np, remote); 423 count += meson_probe_remote(pdev, &match, np, remote);
424 of_node_put(remote);
415 } 425 }
416 426
417 if (count && !match) 427 if (count && !match)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 5beb83d1cf87..ce1b3cc4bf6d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -944,7 +944,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
944 np = dev_pm_opp_get_of_node(opp); 944 np = dev_pm_opp_get_of_node(opp);
945 945
946 if (np) { 946 if (np) {
947 of_property_read_u32(np, "qcom,level", &val); 947 of_property_read_u32(np, "opp-level", &val);
948 of_node_put(np); 948 of_node_put(np);
949 } 949 }
950 950
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2e4372ef17a3..2cfee1a4fe0b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -765,7 +765,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
765 adreno_gpu->rev = config->rev; 765 adreno_gpu->rev = config->rev;
766 766
767 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; 767 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
768 adreno_gpu_config.irqname = "kgsl_3d0_irq";
769 768
770 adreno_gpu_config.va_start = SZ_16M; 769 adreno_gpu_config.va_start = SZ_16M;
771 adreno_gpu_config.va_end = 0xffffffff; 770 adreno_gpu_config.va_end = 0xffffffff;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index fd75870eb17f..6aefcd6db46b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -365,19 +365,6 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
365 &pdpu->pipe_qos_cfg); 365 &pdpu->pipe_qos_cfg);
366} 366}
367 367
368static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
369{
370 struct dpu_plane *pdpu = to_dpu_plane(plane);
371 struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
372
373 if (!pdpu->is_rt_pipe)
374 return;
375
376 pm_runtime_get_sync(&dpu_kms->pdev->dev);
377 _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
378 pm_runtime_put_sync(&dpu_kms->pdev->dev);
379}
380
381/** 368/**
382 * _dpu_plane_set_ot_limit - set OT limit for the given plane 369 * _dpu_plane_set_ot_limit - set OT limit for the given plane
383 * @plane: Pointer to drm plane 370 * @plane: Pointer to drm plane
@@ -1248,6 +1235,19 @@ static void dpu_plane_reset(struct drm_plane *plane)
1248} 1235}
1249 1236
1250#ifdef CONFIG_DEBUG_FS 1237#ifdef CONFIG_DEBUG_FS
1238static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
1239{
1240 struct dpu_plane *pdpu = to_dpu_plane(plane);
1241 struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
1242
1243 if (!pdpu->is_rt_pipe)
1244 return;
1245
1246 pm_runtime_get_sync(&dpu_kms->pdev->dev);
1247 _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
1248 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1249}
1250
1251static ssize_t _dpu_plane_danger_read(struct file *file, 1251static ssize_t _dpu_plane_danger_read(struct file *file,
1252 char __user *buff, size_t count, loff_t *ppos) 1252 char __user *buff, size_t count, loff_t *ppos)
1253{ 1253{
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9cd6a96c6bf2..927e5d86f7c1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -250,7 +250,8 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
250void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 250void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
251 struct msm_gem_vma *vma); 251 struct msm_gem_vma *vma);
252int msm_gem_map_vma(struct msm_gem_address_space *aspace, 252int msm_gem_map_vma(struct msm_gem_address_space *aspace,
253 struct msm_gem_vma *vma, struct sg_table *sgt, int npages); 253 struct msm_gem_vma *vma, int prot,
254 struct sg_table *sgt, int npages);
254void msm_gem_close_vma(struct msm_gem_address_space *aspace, 255void msm_gem_close_vma(struct msm_gem_address_space *aspace,
255 struct msm_gem_vma *vma); 256 struct msm_gem_vma *vma);
256 257
@@ -333,6 +334,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
333struct drm_gem_object *msm_gem_import(struct drm_device *dev, 334struct drm_gem_object *msm_gem_import(struct drm_device *dev,
334 struct dma_buf *dmabuf, struct sg_table *sgt); 335 struct dma_buf *dmabuf, struct sg_table *sgt);
335 336
337__printf(2, 3)
336void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); 338void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
337 339
338int msm_framebuffer_prepare(struct drm_framebuffer *fb, 340int msm_framebuffer_prepare(struct drm_framebuffer *fb,
@@ -396,12 +398,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
396int msm_debugfs_late_init(struct drm_device *dev); 398int msm_debugfs_late_init(struct drm_device *dev);
397int msm_rd_debugfs_init(struct drm_minor *minor); 399int msm_rd_debugfs_init(struct drm_minor *minor);
398void msm_rd_debugfs_cleanup(struct msm_drm_private *priv); 400void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
401__printf(3, 4)
399void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, 402void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
400 const char *fmt, ...); 403 const char *fmt, ...);
401int msm_perf_debugfs_init(struct drm_minor *minor); 404int msm_perf_debugfs_init(struct drm_minor *minor);
402void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); 405void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
403#else 406#else
404static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } 407static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
408__printf(3, 4)
405static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, 409static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
406 const char *fmt, ...) {} 410 const char *fmt, ...) {}
407static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} 411static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 51a95da694d8..c8886d3071fa 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -391,6 +391,10 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
391 struct msm_gem_object *msm_obj = to_msm_bo(obj); 391 struct msm_gem_object *msm_obj = to_msm_bo(obj);
392 struct msm_gem_vma *vma; 392 struct msm_gem_vma *vma;
393 struct page **pages; 393 struct page **pages;
394 int prot = IOMMU_READ;
395
396 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
397 prot |= IOMMU_WRITE;
394 398
395 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 399 WARN_ON(!mutex_is_locked(&msm_obj->lock));
396 400
@@ -405,8 +409,8 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
405 if (IS_ERR(pages)) 409 if (IS_ERR(pages))
406 return PTR_ERR(pages); 410 return PTR_ERR(pages);
407 411
408 return msm_gem_map_vma(aspace, vma, msm_obj->sgt, 412 return msm_gem_map_vma(aspace, vma, prot,
409 obj->size >> PAGE_SHIFT); 413 msm_obj->sgt, obj->size >> PAGE_SHIFT);
410} 414}
411 415
412/* get iova and pin it. Should have a matching put */ 416/* get iova and pin it. Should have a matching put */
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 557360788084..49c04829cf34 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -68,7 +68,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
68 68
69int 69int
70msm_gem_map_vma(struct msm_gem_address_space *aspace, 70msm_gem_map_vma(struct msm_gem_address_space *aspace,
71 struct msm_gem_vma *vma, struct sg_table *sgt, int npages) 71 struct msm_gem_vma *vma, int prot,
72 struct sg_table *sgt, int npages)
72{ 73{
73 unsigned size = npages << PAGE_SHIFT; 74 unsigned size = npages << PAGE_SHIFT;
74 int ret = 0; 75 int ret = 0;
@@ -86,7 +87,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
86 87
87 if (aspace->mmu) 88 if (aspace->mmu)
88 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, 89 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
89 size, IOMMU_READ | IOMMU_WRITE); 90 size, prot);
90 91
91 if (ret) 92 if (ret)
92 vma->mapped = false; 93 vma->mapped = false;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5f3eff304355..10babd18e286 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -900,7 +900,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
900 } 900 }
901 901
902 /* Get Interrupt: */ 902 /* Get Interrupt: */
903 gpu->irq = platform_get_irq_byname(pdev, config->irqname); 903 gpu->irq = platform_get_irq(pdev, 0);
904 if (gpu->irq < 0) { 904 if (gpu->irq < 0) {
905 ret = gpu->irq; 905 ret = gpu->irq;
906 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret); 906 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index efb49bb64191..ca17086f72c9 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -31,7 +31,6 @@ struct msm_gpu_state;
31 31
32struct msm_gpu_config { 32struct msm_gpu_config {
33 const char *ioname; 33 const char *ioname;
34 const char *irqname;
35 uint64_t va_start; 34 uint64_t va_start;
36 uint64_t va_end; 35 uint64_t va_end;
37 unsigned int nr_rings; 36 unsigned int nr_rings;
@@ -63,7 +62,7 @@ struct msm_gpu_funcs {
63 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 62 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
64 void (*recover)(struct msm_gpu *gpu); 63 void (*recover)(struct msm_gpu *gpu);
65 void (*destroy)(struct msm_gpu *gpu); 64 void (*destroy)(struct msm_gpu *gpu);
66#ifdef CONFIG_DEBUG_FS 65#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
67 /* show GPU status in debugfs: */ 66 /* show GPU status in debugfs: */
68 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, 67 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
69 struct drm_printer *p); 68 struct drm_printer *p);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 90e9d0a48dc0..d21172933d92 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
115 char *fptr = &fifo->buf[fifo->head]; 115 char *fptr = &fifo->buf[fifo->head];
116 int n; 116 int n;
117 117
118 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); 118 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
119 if (!rd->open)
120 return;
119 121
120 /* Note that smp_load_acquire() is not strictly required 122 /* Note that smp_load_acquire() is not strictly required
121 * as CIRC_SPACE_TO_END() does not access the tail more 123 * as CIRC_SPACE_TO_END() does not access the tail more
@@ -213,7 +215,10 @@ out:
213static int rd_release(struct inode *inode, struct file *file) 215static int rd_release(struct inode *inode, struct file *file)
214{ 216{
215 struct msm_rd_state *rd = inode->i_private; 217 struct msm_rd_state *rd = inode->i_private;
218
216 rd->open = false; 219 rd->open = false;
220 wake_up_all(&rd->fifo_event);
221
217 return 0; 222 return 0;
218} 223}
219 224
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 5f5be6368aed..c7a94c94dbf3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -253,6 +253,9 @@ nouveau_backlight_init(struct drm_connector *connector)
253 case NV_DEVICE_INFO_V0_FERMI: 253 case NV_DEVICE_INFO_V0_FERMI:
254 case NV_DEVICE_INFO_V0_KEPLER: 254 case NV_DEVICE_INFO_V0_KEPLER:
255 case NV_DEVICE_INFO_V0_MAXWELL: 255 case NV_DEVICE_INFO_V0_MAXWELL:
256 case NV_DEVICE_INFO_V0_PASCAL:
257 case NV_DEVICE_INFO_V0_VOLTA:
258 case NV_DEVICE_INFO_V0_TURING:
256 ret = nv50_backlight_init(nv_encoder, &props, &ops); 259 ret = nv50_backlight_init(nv_encoder, &props, &ops);
257 break; 260 break;
258 default: 261 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index bfbc9341e0c2..d9edb5785813 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2435,6 +2435,38 @@ nv140_chipset = {
2435}; 2435};
2436 2436
2437static const struct nvkm_device_chip 2437static const struct nvkm_device_chip
2438nv162_chipset = {
2439 .name = "TU102",
2440 .bar = tu104_bar_new,
2441 .bios = nvkm_bios_new,
2442 .bus = gf100_bus_new,
2443 .devinit = tu104_devinit_new,
2444 .fault = tu104_fault_new,
2445 .fb = gv100_fb_new,
2446 .fuse = gm107_fuse_new,
2447 .gpio = gk104_gpio_new,
2448 .i2c = gm200_i2c_new,
2449 .ibus = gm200_ibus_new,
2450 .imem = nv50_instmem_new,
2451 .ltc = gp102_ltc_new,
2452 .mc = tu104_mc_new,
2453 .mmu = tu104_mmu_new,
2454 .pci = gp100_pci_new,
2455 .pmu = gp102_pmu_new,
2456 .therm = gp100_therm_new,
2457 .timer = gk20a_timer_new,
2458 .top = gk104_top_new,
2459 .ce[0] = tu104_ce_new,
2460 .ce[1] = tu104_ce_new,
2461 .ce[2] = tu104_ce_new,
2462 .ce[3] = tu104_ce_new,
2463 .ce[4] = tu104_ce_new,
2464 .disp = tu104_disp_new,
2465 .dma = gv100_dma_new,
2466 .fifo = tu104_fifo_new,
2467};
2468
2469static const struct nvkm_device_chip
2438nv164_chipset = { 2470nv164_chipset = {
2439 .name = "TU104", 2471 .name = "TU104",
2440 .bar = tu104_bar_new, 2472 .bar = tu104_bar_new,
@@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2950 case 0x138: device->chip = &nv138_chipset; break; 2982 case 0x138: device->chip = &nv138_chipset; break;
2951 case 0x13b: device->chip = &nv13b_chipset; break; 2983 case 0x13b: device->chip = &nv13b_chipset; break;
2952 case 0x140: device->chip = &nv140_chipset; break; 2984 case 0x140: device->chip = &nv140_chipset; break;
2985 case 0x162: device->chip = &nv162_chipset; break;
2953 case 0x164: device->chip = &nv164_chipset; break; 2986 case 0x164: device->chip = &nv164_chipset; break;
2954 case 0x166: device->chip = &nv166_chipset; break; 2987 case 0x166: device->chip = &nv166_chipset; break;
2955 default: 2988 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 816ccaedfc73..8675613e142b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -22,6 +22,7 @@
22#include <engine/falcon.h> 22#include <engine/falcon.h>
23 23
24#include <core/gpuobj.h> 24#include <core/gpuobj.h>
25#include <subdev/mc.h>
25#include <subdev/timer.h> 26#include <subdev/timer.h>
26#include <engine/fifo.h> 27#include <engine/fifo.h>
27 28
@@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
107 } 108 }
108 } 109 }
109 110
110 nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); 111 if (nvkm_mc_enabled(device, engine->subdev.index)) {
111 nvkm_wr32(device, base + 0x014, 0xffffffff); 112 nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
113 nvkm_wr32(device, base + 0x014, 0xffffffff);
114 }
112 return 0; 115 return 0;
113} 116}
114 117
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 3695cde669f8..07914e36939e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
132 duty = nvkm_therm_update_linear(therm); 132 duty = nvkm_therm_update_linear(therm);
133 break; 133 break;
134 case NVBIOS_THERM_FAN_OTHER: 134 case NVBIOS_THERM_FAN_OTHER:
135 if (therm->cstate) 135 if (therm->cstate) {
136 duty = therm->cstate; 136 duty = therm->cstate;
137 else 137 poll = false;
138 } else {
138 duty = nvkm_therm_update_linear_fallback(therm); 139 duty = nvkm_therm_update_linear_fallback(therm);
139 poll = false; 140 }
140 break; 141 break;
141 } 142 }
142 immd = false; 143 immd = false;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 00a9c2ab9e6c..64fb788b6647 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll)
1406 1406
1407static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) 1407static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
1408{ 1408{
1409 struct dsi_data *dsi = p; 1409 struct dsi_data *dsi = s->private;
1410 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; 1410 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
1411 enum dss_clk_source dispc_clk_src, dsi_clk_src; 1411 enum dss_clk_source dispc_clk_src, dsi_clk_src;
1412 int dsi_module = dsi->module_id; 1412 int dsi_module = dsi->module_id;
@@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
1467#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 1467#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1468static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) 1468static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
1469{ 1469{
1470 struct dsi_data *dsi = p; 1470 struct dsi_data *dsi = s->private;
1471 unsigned long flags; 1471 unsigned long flags;
1472 struct dsi_irq_stats stats; 1472 struct dsi_irq_stats stats;
1473 1473
@@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
1558 1558
1559static int dsi_dump_dsi_regs(struct seq_file *s, void *p) 1559static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
1560{ 1560{
1561 struct dsi_data *dsi = p; 1561 struct dsi_data *dsi = s->private;
1562 1562
1563 if (dsi_runtime_get(dsi)) 1563 if (dsi_runtime_get(dsi))
1564 return 0; 1564 return 0;
@@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
4751 dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; 4751 dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
4752 dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; 4752 dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
4753 dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; 4753 dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
4754 /*
4755 * HACK: These flags should be handled through the omap_dss_device bus
4756 * flags, but this will only be possible when the DSI encoder will be
4757 * converted to the omapdrm-managed encoder model.
4758 */
4759 dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
4760 dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
4761 dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
4762 dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
4763 dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
4764 dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
4754 4765
4755 dss_mgr_set_timings(&dsi->output, &dsi->vm); 4766 dss_mgr_set_timings(&dsi->output, &dsi->vm);
4756 4767
@@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
5083 5094
5084 snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); 5095 snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
5085 dsi->debugfs.regs = dss_debugfs_create_file(dss, name, 5096 dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
5086 dsi_dump_dsi_regs, &dsi); 5097 dsi_dump_dsi_regs, dsi);
5087#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 5098#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5088 snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); 5099 snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
5089 dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, 5100 dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
5090 dsi_dump_dsi_irqs, &dsi); 5101 dsi_dump_dsi_irqs, dsi);
5091#endif 5102#endif
5092 snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); 5103 snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
5093 dsi->debugfs.clks = dss_debugfs_create_file(dss, name, 5104 dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
5094 dsi_dump_dsi_clocks, &dsi); 5105 dsi_dump_dsi_clocks, dsi);
5095 5106
5096 return 0; 5107 return 0;
5097} 5108}
@@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data)
5104 dss_debugfs_remove_file(dsi->debugfs.irqs); 5115 dss_debugfs_remove_file(dsi->debugfs.irqs);
5105 dss_debugfs_remove_file(dsi->debugfs.regs); 5116 dss_debugfs_remove_file(dsi->debugfs.regs);
5106 5117
5107 of_platform_depopulate(dev);
5108
5109 WARN_ON(dsi->scp_clk_refcount > 0); 5118 WARN_ON(dsi->scp_clk_refcount > 0);
5110 5119
5111 dss_pll_unregister(&dsi->pll); 5120 dss_pll_unregister(&dsi->pll);
@@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev)
5457 5466
5458 dsi_uninit_output(dsi); 5467 dsi_uninit_output(dsi);
5459 5468
5469 of_platform_depopulate(&pdev->dev);
5470
5460 pm_runtime_disable(&pdev->dev); 5471 pm_runtime_disable(&pdev->dev);
5461 5472
5462 if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { 5473 if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 13c8a662f9b4..ccb090f3ab30 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = {
250#if defined(CONFIG_DEBUG_FS) 250#if defined(CONFIG_DEBUG_FS)
251 .debugfs_init = qxl_debugfs_init, 251 .debugfs_init = qxl_debugfs_init,
252#endif 252#endif
253 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
254 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
255 .gem_prime_export = drm_gem_prime_export, 253 .gem_prime_export = drm_gem_prime_export,
256 .gem_prime_import = drm_gem_prime_import, 254 .gem_prime_import = drm_gem_prime_import,
257 .gem_prime_pin = qxl_gem_prime_pin, 255 .gem_prime_pin = qxl_gem_prime_pin,
258 .gem_prime_unpin = qxl_gem_prime_unpin, 256 .gem_prime_unpin = qxl_gem_prime_unpin,
259 .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
260 .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
261 .gem_prime_vmap = qxl_gem_prime_vmap, 257 .gem_prime_vmap = qxl_gem_prime_vmap,
262 .gem_prime_vunmap = qxl_gem_prime_vunmap, 258 .gem_prime_vunmap = qxl_gem_prime_vunmap,
263 .gem_prime_mmap = qxl_gem_prime_mmap, 259 .gem_prime_mmap = qxl_gem_prime_mmap,
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index a55dece118b2..df65d3c1a7b8 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
38 WARN_ONCE(1, "not implemented"); 38 WARN_ONCE(1, "not implemented");
39} 39}
40 40
41struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
42{
43 WARN_ONCE(1, "not implemented");
44 return ERR_PTR(-ENOSYS);
45}
46
47struct drm_gem_object *qxl_gem_prime_import_sg_table(
48 struct drm_device *dev, struct dma_buf_attachment *attach,
49 struct sg_table *table)
50{
51 WARN_ONCE(1, "not implemented");
52 return ERR_PTR(-ENOSYS);
53}
54
55void *qxl_gem_prime_vmap(struct drm_gem_object *obj) 41void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
56{ 42{
57 WARN_ONCE(1, "not implemented"); 43 WARN_ONCE(1, "not implemented");
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d587779a80b4..a97294ac96d5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
5676 u16 data_offset, size; 5676 u16 data_offset, size;
5677 u8 frev, crev; 5677 u8 frev, crev;
5678 struct ci_power_info *pi; 5678 struct ci_power_info *pi;
5679 enum pci_bus_speed speed_cap; 5679 enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
5680 struct pci_dev *root = rdev->pdev->bus->self; 5680 struct pci_dev *root = rdev->pdev->bus->self;
5681 int ret; 5681 int ret;
5682 5682
@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
5685 return -ENOMEM; 5685 return -ENOMEM;
5686 rdev->pm.dpm.priv = pi; 5686 rdev->pm.dpm.priv = pi;
5687 5687
5688 speed_cap = pcie_get_speed_cap(root); 5688 if (!pci_is_root_bus(rdev->pdev->bus))
5689 speed_cap = pcie_get_speed_cap(root);
5689 if (speed_cap == PCI_SPEED_UNKNOWN) { 5690 if (speed_cap == PCI_SPEED_UNKNOWN) {
5690 pi->sys_pcie_mask = 0; 5691 pi->sys_pcie_mask = 0;
5691 } else { 5692 } else {
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dec1e081f529..6a8fb6fd183c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
172 } 172 }
173 173
174 if (radeon_is_px(dev)) { 174 if (radeon_is_px(dev)) {
175 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
175 pm_runtime_use_autosuspend(dev->dev); 176 pm_runtime_use_autosuspend(dev->dev);
176 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 177 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
177 pm_runtime_set_active(dev->dev); 178 pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8fb60b3af015..0a785ef0ab66 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
6899 struct ni_power_info *ni_pi; 6899 struct ni_power_info *ni_pi;
6900 struct si_power_info *si_pi; 6900 struct si_power_info *si_pi;
6901 struct atom_clock_dividers dividers; 6901 struct atom_clock_dividers dividers;
6902 enum pci_bus_speed speed_cap; 6902 enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
6903 struct pci_dev *root = rdev->pdev->bus->self; 6903 struct pci_dev *root = rdev->pdev->bus->self;
6904 int ret; 6904 int ret;
6905 6905
@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
6911 eg_pi = &ni_pi->eg; 6911 eg_pi = &ni_pi->eg;
6912 pi = &eg_pi->rv7xx; 6912 pi = &eg_pi->rv7xx;
6913 6913
6914 speed_cap = pcie_get_speed_cap(root); 6914 if (!pci_is_root_bus(rdev->pdev->bus))
6915 speed_cap = pcie_get_speed_cap(root);
6915 if (speed_cap == PCI_SPEED_UNKNOWN) { 6916 if (speed_cap == PCI_SPEED_UNKNOWN) {
6916 si_pi->sys_pcie_mask = 0; 6917 si_pi->sys_pcie_mask = 0;
6917 } else { 6918 } else {
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 96ac1458a59c..c0351abf83a3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -1,17 +1,8 @@
1//SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author: 4 * Author:
5 * Sandy Huang <hjc@rock-chips.com> 5 * Sandy Huang <hjc@rock-chips.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */ 6 */
16 7
17#include <drm/drmP.h> 8#include <drm/drmP.h>
@@ -113,8 +104,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
113 child_count++; 104 child_count++;
114 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, 105 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
115 &panel, &bridge); 106 &panel, &bridge);
116 if (!ret) 107 if (!ret) {
108 of_node_put(endpoint);
117 break; 109 break;
110 }
118 } 111 }
119 112
120 of_node_put(port); 113 of_node_put(port);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
index 38b52e63b2b0..27b9635124bc 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -1,17 +1,8 @@
1//SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author: 4 * Author:
5 * Sandy Huang <hjc@rock-chips.com> 5 * Sandy Huang <hjc@rock-chips.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */ 6 */
16 7
17#ifdef CONFIG_ROCKCHIP_RGB 8#ifdef CONFIG_ROCKCHIP_RGB
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 4463d3826ecb..e2942c9a11a7 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -440,13 +440,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
440 440
441 while ((entity->dependency = 441 while ((entity->dependency =
442 sched->ops->dependency(sched_job, entity))) { 442 sched->ops->dependency(sched_job, entity))) {
443 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
443 444
444 if (drm_sched_entity_add_dependency_cb(entity)) { 445 if (drm_sched_entity_add_dependency_cb(entity))
445
446 trace_drm_sched_job_wait_dep(sched_job,
447 entity->dependency);
448 return NULL; 446 return NULL;
449 }
450 } 447 }
451 448
452 /* skip jobs from entity that marked guilty */ 449 /* skip jobs from entity that marked guilty */
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 9e9255ee59cd..a021bab11a4f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
786 remote = of_graph_get_remote_port_parent(ep); 786 remote = of_graph_get_remote_port_parent(ep);
787 if (!remote) 787 if (!remote)
788 continue; 788 continue;
789 of_node_put(remote);
789 790
790 /* does this node match any registered engines? */ 791 /* does this node match any registered engines? */
791 list_for_each_entry(frontend, &drv->frontend_list, list) { 792 list_for_each_entry(frontend, &drv->frontend_list, list) {
792 if (remote == frontend->node) { 793 if (remote == frontend->node) {
793 of_node_put(remote);
794 of_node_put(port); 794 of_node_put(port);
795 of_node_put(ep);
795 return frontend; 796 return frontend;
796 } 797 }
797 } 798 }
798 } 799 }
799 800 of_node_put(port);
800 return ERR_PTR(-EINVAL); 801 return ERR_PTR(-EINVAL);
801} 802}
802 803
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 061d2e0d9011..416da5376701 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
92 val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); 92 val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
93 val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; 93 val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
94 writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); 94 writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
95
96 clk_disable_unprepare(hdmi->tmds_clk);
95} 97}
96 98
97static void sun4i_hdmi_enable(struct drm_encoder *encoder) 99static void sun4i_hdmi_enable(struct drm_encoder *encoder)
@@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder)
102 104
103 DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); 105 DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
104 106
107 clk_prepare_enable(hdmi->tmds_clk);
108
105 sun4i_hdmi_setup_avi_infoframes(hdmi, mode); 109 sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
106 val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); 110 val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
107 val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); 111 val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 0420f5c978b9..cf45d0f940f9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
761 return PTR_ERR(tcon->sclk0); 761 return PTR_ERR(tcon->sclk0);
762 } 762 }
763 } 763 }
764 clk_prepare_enable(tcon->sclk0);
764 765
765 if (tcon->quirks->has_channel_1) { 766 if (tcon->quirks->has_channel_1) {
766 tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); 767 tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
@@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
775 776
776static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) 777static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
777{ 778{
779 clk_disable_unprepare(tcon->sclk0);
778 clk_disable_unprepare(tcon->clk); 780 clk_disable_unprepare(tcon->clk);
779} 781}
780 782
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index f7f32a885af7..2d1aaca49105 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -127,14 +127,10 @@ static struct drm_driver driver = {
127#if defined(CONFIG_DEBUG_FS) 127#if defined(CONFIG_DEBUG_FS)
128 .debugfs_init = virtio_gpu_debugfs_init, 128 .debugfs_init = virtio_gpu_debugfs_init,
129#endif 129#endif
130 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
131 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
132 .gem_prime_export = drm_gem_prime_export, 130 .gem_prime_export = drm_gem_prime_export,
133 .gem_prime_import = drm_gem_prime_import, 131 .gem_prime_import = drm_gem_prime_import,
134 .gem_prime_pin = virtgpu_gem_prime_pin, 132 .gem_prime_pin = virtgpu_gem_prime_pin,
135 .gem_prime_unpin = virtgpu_gem_prime_unpin, 133 .gem_prime_unpin = virtgpu_gem_prime_unpin,
136 .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
137 .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
138 .gem_prime_vmap = virtgpu_gem_prime_vmap, 134 .gem_prime_vmap = virtgpu_gem_prime_vmap,
139 .gem_prime_vunmap = virtgpu_gem_prime_vunmap, 135 .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
140 .gem_prime_mmap = virtgpu_gem_prime_mmap, 136 .gem_prime_mmap = virtgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 1deb41d42ea4..0c15000f926e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
372/* virtgpu_prime.c */ 372/* virtgpu_prime.c */
373int virtgpu_gem_prime_pin(struct drm_gem_object *obj); 373int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
374void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); 374void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
375struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
376struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
377 struct drm_device *dev, struct dma_buf_attachment *attach,
378 struct sg_table *sgt);
379void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); 375void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
380void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 376void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
381int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, 377int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 86ce0ae93f59..c59ec34c80a5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
39 WARN_ONCE(1, "not implemented"); 39 WARN_ONCE(1, "not implemented");
40} 40}
41 41
42struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
43{
44 WARN_ONCE(1, "not implemented");
45 return ERR_PTR(-ENODEV);
46}
47
48struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
49 struct drm_device *dev, struct dma_buf_attachment *attach,
50 struct sg_table *table)
51{
52 WARN_ONCE(1, "not implemented");
53 return ERR_PTR(-ENODEV);
54}
55
56void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) 42void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
57{ 43{
58 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 44 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
index 9d9e8146db90..d7b409a3c0f8 100644
--- a/drivers/gpu/drm/vkms/vkms_crc.c
+++ b/drivers/gpu/drm/vkms/vkms_crc.c
@@ -1,4 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2
2#include "vkms_drv.h" 3#include "vkms_drv.h"
3#include <linux/crc32.h> 4#include <linux/crc32.h>
4#include <drm/drm_atomic.h> 5#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 177bbcb38306..eb56ee893761 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include "vkms_drv.h" 3#include "vkms_drv.h"
10#include <drm/drm_atomic_helper.h> 4#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 83087877565c..7dcbecb5fac2 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -1,9 +1,4 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 */
7 2
8/** 3/**
9 * DOC: vkms (Virtual Kernel Modesetting) 4 * DOC: vkms (Virtual Kernel Modesetting)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index e4469cd3d254..81f1cfbeb936 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -1,3 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2
1#ifndef _VKMS_DRV_H_ 3#ifndef _VKMS_DRV_H_
2#define _VKMS_DRV_H_ 4#define _VKMS_DRV_H_
3 5
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 80311daed47a..138b0bb325cf 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include <linux/shmem_fs.h> 3#include <linux/shmem_fs.h>
10 4
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 271a0eb9042c..4173e4f48334 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include "vkms_drv.h" 3#include "vkms_drv.h"
10#include <drm/drm_crtc_helper.h> 4#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 418817600ad1..0e67d2d42f0c 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include "vkms_drv.h" 3#include "vkms_drv.h"
10#include <drm/drm_plane_helper.h> 4#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 25afb1d594e3..7ef5dcb06104 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -26,6 +26,7 @@
26 **************************************************************************/ 26 **************************************************************************/
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/dma-mapping.h>
29 30
30#include <drm/drmP.h> 31#include <drm/drmP.h>
31#include "vmwgfx_drv.h" 32#include "vmwgfx_drv.h"
@@ -34,7 +35,6 @@
34#include <drm/ttm/ttm_placement.h> 35#include <drm/ttm/ttm_placement.h>
35#include <drm/ttm/ttm_bo_driver.h> 36#include <drm/ttm/ttm_bo_driver.h>
36#include <drm/ttm/ttm_module.h> 37#include <drm/ttm/ttm_module.h>
37#include <linux/intel-iommu.h>
38 38
39#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 39#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
40#define VMWGFX_CHIP_SVGAII 0 40#define VMWGFX_CHIP_SVGAII 0
@@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
546} 546}
547 547
548/** 548/**
549 * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
550 * taking place.
551 * @dev: Pointer to the struct drm_device.
552 *
553 * Return: true if iommu present, false otherwise.
554 */
555static bool vmw_assume_iommu(struct drm_device *dev)
556{
557 const struct dma_map_ops *ops = get_dma_ops(dev->dev);
558
559 return !dma_is_direct(ops) && ops &&
560 ops->map_page != dma_direct_map_page;
561}
562
563/**
549 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 564 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
550 * system. 565 * system.
551 * 566 *
@@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
565 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 580 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
566 [vmw_dma_map_populate] = "Keeping DMA mappings.", 581 [vmw_dma_map_populate] = "Keeping DMA mappings.",
567 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 582 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
568#ifdef CONFIG_X86
569 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
570 583
571#ifdef CONFIG_INTEL_IOMMU 584 if (vmw_force_coherent)
572 if (intel_iommu_enabled) { 585 dev_priv->map_mode = vmw_dma_alloc_coherent;
586 else if (vmw_assume_iommu(dev_priv->dev))
573 dev_priv->map_mode = vmw_dma_map_populate; 587 dev_priv->map_mode = vmw_dma_map_populate;
574 goto out_fixup; 588 else if (!vmw_force_iommu)
575 }
576#endif
577
578 if (!(vmw_force_iommu || vmw_force_coherent)) {
579 dev_priv->map_mode = vmw_dma_phys; 589 dev_priv->map_mode = vmw_dma_phys;
580 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 590 else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
581 return 0;
582 }
583
584 dev_priv->map_mode = vmw_dma_map_populate;
585
586 if (dma_ops && dma_ops->sync_single_for_cpu)
587 dev_priv->map_mode = vmw_dma_alloc_coherent; 591 dev_priv->map_mode = vmw_dma_alloc_coherent;
588#ifdef CONFIG_SWIOTLB 592 else
589 if (swiotlb_nr_tbl() == 0)
590 dev_priv->map_mode = vmw_dma_map_populate; 593 dev_priv->map_mode = vmw_dma_map_populate;
591#endif
592 594
593#ifdef CONFIG_INTEL_IOMMU 595 if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
594out_fixup:
595#endif
596 if (dev_priv->map_mode == vmw_dma_map_populate &&
597 vmw_restrict_iommu)
598 dev_priv->map_mode = vmw_dma_map_bind; 596 dev_priv->map_mode = vmw_dma_map_bind;
599 597
600 if (vmw_force_coherent) 598 /* No TTM coherent page pool? FIXME: Ask TTM instead! */
601 dev_priv->map_mode = vmw_dma_alloc_coherent; 599 if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
602 600 (dev_priv->map_mode == vmw_dma_alloc_coherent))
603#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
604 /*
605 * No coherent page pool
606 */
607 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
608 return -EINVAL; 601 return -EINVAL;
609#endif
610
611#else /* CONFIG_X86 */
612 dev_priv->map_mode = vmw_dma_map_populate;
613#endif /* CONFIG_X86 */
614 602
615 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 603 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
616
617 return 0; 604 return 0;
618} 605}
619 606
@@ -625,24 +612,20 @@ out_fixup:
625 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 612 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
626 * restriction also for 64-bit systems. 613 * restriction also for 64-bit systems.
627 */ 614 */
628#ifdef CONFIG_INTEL_IOMMU
629static int vmw_dma_masks(struct vmw_private *dev_priv) 615static int vmw_dma_masks(struct vmw_private *dev_priv)
630{ 616{
631 struct drm_device *dev = dev_priv->dev; 617 struct drm_device *dev = dev_priv->dev;
618 int ret = 0;
632 619
633 if (intel_iommu_enabled && 620 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
621 if (dev_priv->map_mode != vmw_dma_phys &&
634 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 622 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
635 DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 623 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
636 return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); 624 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
637 } 625 }
638 return 0; 626
639} 627 return ret;
640#else
641static int vmw_dma_masks(struct vmw_private *dev_priv)
642{
643 return 0;
644} 628}
645#endif
646 629
647static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 630static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
648{ 631{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f2d13a72c05d..88b8178d4687 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3570 *p_fence = NULL; 3570 *p_fence = NULL;
3571 } 3571 }
3572 3572
3573 return 0; 3573 return ret;
3574} 3574}
3575 3575
3576/** 3576/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b351fb5214d3..ed2f67822f45 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
1646 struct drm_connector_state *conn_state; 1646 struct drm_connector_state *conn_state;
1647 struct vmw_connector_state *vmw_conn_state; 1647 struct vmw_connector_state *vmw_conn_state;
1648 1648
1649 if (!du->pref_active) { 1649 if (!du->pref_active && new_crtc_state->enable) {
1650 ret = -EINVAL; 1650 ret = -EINVAL;
1651 goto clean; 1651 goto clean;
1652 } 1652 }
@@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2554 user_fence_rep) 2554 user_fence_rep)
2555{ 2555{
2556 struct vmw_fence_obj *fence = NULL; 2556 struct vmw_fence_obj *fence = NULL;
2557 uint32_t handle; 2557 uint32_t handle = 0;
2558 int ret; 2558 int ret = 0;
2559 2559
2560 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || 2560 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2561 out_fence) 2561 out_fence)
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 474b00e19697..0a7d4395d427 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = {
898 .cpmem_ofs = 0x1f000000, 898 .cpmem_ofs = 0x1f000000,
899 .srm_ofs = 0x1f040000, 899 .srm_ofs = 0x1f040000,
900 .tpm_ofs = 0x1f060000, 900 .tpm_ofs = 0x1f060000,
901 .csi0_ofs = 0x1f030000, 901 .csi0_ofs = 0x1e030000,
902 .csi1_ofs = 0x1f038000, 902 .csi1_ofs = 0x1e038000,
903 .ic_ofs = 0x1e020000, 903 .ic_ofs = 0x1e020000,
904 .disp0_ofs = 0x1e040000, 904 .disp0_ofs = 0x1e040000,
905 .disp1_ofs = 0x1e048000, 905 .disp1_ofs = 0x1e048000,
@@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = {
914 .cpmem_ofs = 0x07000000, 914 .cpmem_ofs = 0x07000000,
915 .srm_ofs = 0x07040000, 915 .srm_ofs = 0x07040000,
916 .tpm_ofs = 0x07060000, 916 .tpm_ofs = 0x07060000,
917 .csi0_ofs = 0x07030000, 917 .csi0_ofs = 0x06030000,
918 .csi1_ofs = 0x07038000, 918 .csi1_ofs = 0x06038000,
919 .ic_ofs = 0x06020000, 919 .ic_ofs = 0x06020000,
920 .disp0_ofs = 0x06040000, 920 .disp0_ofs = 0x06040000,
921 .disp1_ofs = 0x06048000, 921 .disp1_ofs = 0x06048000,
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 2f8db9d62551..4a28f3fbb0a2 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -106,6 +106,7 @@ struct ipu_pre {
106 void *buffer_virt; 106 void *buffer_virt;
107 bool in_use; 107 bool in_use;
108 unsigned int safe_window_end; 108 unsigned int safe_window_end;
109 unsigned int last_bufaddr;
109}; 110};
110 111
111static DEFINE_MUTEX(ipu_pre_list_mutex); 112static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
185 186
186 writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); 187 writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
187 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); 188 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
189 pre->last_bufaddr = bufaddr;
188 190
189 val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | 191 val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) |
190 IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | 192 IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) |
@@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
242 unsigned short current_yblock; 244 unsigned short current_yblock;
243 u32 val; 245 u32 val;
244 246
247 if (bufaddr == pre->last_bufaddr)
248 return;
249
245 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); 250 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
251 pre->last_bufaddr = bufaddr;
246 252
247 do { 253 do {
248 if (time_after(jiffies, timeout)) { 254 if (time_after(jiffies, timeout)) {
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index b677e5d524e6..d5f1d8e1c6f8 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -21,6 +21,7 @@ config VGA_SWITCHEROO
21 bool "Laptop Hybrid Graphics - GPU switching support" 21 bool "Laptop Hybrid Graphics - GPU switching support"
22 depends on X86 22 depends on X86
23 depends on ACPI 23 depends on ACPI
24 depends on PCI
24 select VGA_ARB 25 select VGA_ARB
25 help 26 help
26 Many laptops released in 2008/9/10 have two GPUs with a multiplexer 27 Many laptops released in 2008/9/10 have two GPUs with a multiplexer
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f41d5fe51abe..9993b692598f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -125,6 +125,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
125{ 125{
126 struct hid_collection *collection; 126 struct hid_collection *collection;
127 unsigned usage; 127 unsigned usage;
128 int collection_index;
128 129
129 usage = parser->local.usage[0]; 130 usage = parser->local.usage[0];
130 131
@@ -167,13 +168,13 @@ static int open_collection(struct hid_parser *parser, unsigned type)
167 parser->collection_stack[parser->collection_stack_ptr++] = 168 parser->collection_stack[parser->collection_stack_ptr++] =
168 parser->device->maxcollection; 169 parser->device->maxcollection;
169 170
170 collection = parser->device->collection + 171 collection_index = parser->device->maxcollection++;
171 parser->device->maxcollection++; 172 collection = parser->device->collection + collection_index;
172 collection->type = type; 173 collection->type = type;
173 collection->usage = usage; 174 collection->usage = usage;
174 collection->level = parser->collection_stack_ptr - 1; 175 collection->level = parser->collection_stack_ptr - 1;
175 collection->parent = parser->active_collection; 176 collection->parent_idx = (collection->level == 0) ? -1 :
176 parser->active_collection = collection; 177 parser->collection_stack[collection->level - 1];
177 178
178 if (type == HID_COLLECTION_APPLICATION) 179 if (type == HID_COLLECTION_APPLICATION)
179 parser->device->maxapplication++; 180 parser->device->maxapplication++;
@@ -192,8 +193,6 @@ static int close_collection(struct hid_parser *parser)
192 return -EINVAL; 193 return -EINVAL;
193 } 194 }
194 parser->collection_stack_ptr--; 195 parser->collection_stack_ptr--;
195 if (parser->active_collection)
196 parser->active_collection = parser->active_collection->parent;
197 return 0; 196 return 0;
198} 197}
199 198
@@ -1006,10 +1005,12 @@ static void hid_apply_multiplier_to_field(struct hid_device *hid,
1006 usage = &field->usage[i]; 1005 usage = &field->usage[i];
1007 1006
1008 collection = &hid->collection[usage->collection_index]; 1007 collection = &hid->collection[usage->collection_index];
1009 while (collection && collection != multiplier_collection) 1008 while (collection->parent_idx != -1 &&
1010 collection = collection->parent; 1009 collection != multiplier_collection)
1010 collection = &hid->collection[collection->parent_idx];
1011 1011
1012 if (collection || multiplier_collection == NULL) 1012 if (collection->parent_idx != -1 ||
1013 multiplier_collection == NULL)
1013 usage->resolution_multiplier = effective_multiplier; 1014 usage->resolution_multiplier = effective_multiplier;
1014 1015
1015 } 1016 }
@@ -1044,9 +1045,9 @@ static void hid_apply_multiplier(struct hid_device *hid,
1044 * applicable fields later. 1045 * applicable fields later.
1045 */ 1046 */
1046 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1047 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1047 while (multiplier_collection && 1048 while (multiplier_collection->parent_idx != -1 &&
1048 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1049 multiplier_collection->type != HID_COLLECTION_LOGICAL)
1049 multiplier_collection = multiplier_collection->parent; 1050 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1050 1051
1051 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1052 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1052 1053
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index c530476edba6..ac9fda1b5a72 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/debugfs.h> 31#include <linux/debugfs.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include <linux/kfifo.h>
33#include <linux/sched/signal.h> 34#include <linux/sched/signal.h>
34#include <linux/export.h> 35#include <linux/export.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
661/* enqueue string to 'events' ring buffer */ 662/* enqueue string to 'events' ring buffer */
662void hid_debug_event(struct hid_device *hdev, char *buf) 663void hid_debug_event(struct hid_device *hdev, char *buf)
663{ 664{
664 unsigned i;
665 struct hid_debug_list *list; 665 struct hid_debug_list *list;
666 unsigned long flags; 666 unsigned long flags;
667 667
668 spin_lock_irqsave(&hdev->debug_list_lock, flags); 668 spin_lock_irqsave(&hdev->debug_list_lock, flags);
669 list_for_each_entry(list, &hdev->debug_list, node) { 669 list_for_each_entry(list, &hdev->debug_list, node)
670 for (i = 0; buf[i]; i++) 670 kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
671 list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
672 buf[i];
673 list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
674 }
675 spin_unlock_irqrestore(&hdev->debug_list_lock, flags); 671 spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
676 672
677 wake_up_interruptible(&hdev->debug_wait); 673 wake_up_interruptible(&hdev->debug_wait);
@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
722 hid_debug_event(hdev, buf); 718 hid_debug_event(hdev, buf);
723 719
724 kfree(buf); 720 kfree(buf);
725 wake_up_interruptible(&hdev->debug_wait); 721 wake_up_interruptible(&hdev->debug_wait);
726
727} 722}
728EXPORT_SYMBOL_GPL(hid_dump_input); 723EXPORT_SYMBOL_GPL(hid_dump_input);
729 724
@@ -1083,8 +1078,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
1083 goto out; 1078 goto out;
1084 } 1079 }
1085 1080
1086 if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) { 1081 err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
1087 err = -ENOMEM; 1082 if (err) {
1088 kfree(list); 1083 kfree(list);
1089 goto out; 1084 goto out;
1090 } 1085 }
@@ -1104,77 +1099,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
1104 size_t count, loff_t *ppos) 1099 size_t count, loff_t *ppos)
1105{ 1100{
1106 struct hid_debug_list *list = file->private_data; 1101 struct hid_debug_list *list = file->private_data;
1107 int ret = 0, len; 1102 int ret = 0, copied;
1108 DECLARE_WAITQUEUE(wait, current); 1103 DECLARE_WAITQUEUE(wait, current);
1109 1104
1110 mutex_lock(&list->read_mutex); 1105 mutex_lock(&list->read_mutex);
1111 while (ret == 0) { 1106 if (kfifo_is_empty(&list->hid_debug_fifo)) {
1112 if (list->head == list->tail) { 1107 add_wait_queue(&list->hdev->debug_wait, &wait);
1113 add_wait_queue(&list->hdev->debug_wait, &wait); 1108 set_current_state(TASK_INTERRUPTIBLE);
1114 set_current_state(TASK_INTERRUPTIBLE); 1109
1115 1110 while (kfifo_is_empty(&list->hid_debug_fifo)) {
1116 while (list->head == list->tail) { 1111 if (file->f_flags & O_NONBLOCK) {
1117 if (file->f_flags & O_NONBLOCK) { 1112 ret = -EAGAIN;
1118 ret = -EAGAIN; 1113 break;
1119 break; 1114 }
1120 }
1121 if (signal_pending(current)) {
1122 ret = -ERESTARTSYS;
1123 break;
1124 }
1125 1115
1126 if (!list->hdev || !list->hdev->debug) { 1116 if (signal_pending(current)) {
1127 ret = -EIO; 1117 ret = -ERESTARTSYS;
1128 set_current_state(TASK_RUNNING); 1118 break;
1129 goto out; 1119 }
1130 }
1131 1120
1132 /* allow O_NONBLOCK from other threads */ 1121 /* if list->hdev is NULL we cannot remove_wait_queue().
1133 mutex_unlock(&list->read_mutex); 1122 * if list->hdev->debug is 0 then hid_debug_unregister()
1134 schedule(); 1123 * was already called and list->hdev is being destroyed.
1135 mutex_lock(&list->read_mutex); 1124 * if we add remove_wait_queue() here we can hit a race.
1136 set_current_state(TASK_INTERRUPTIBLE); 1125 */
1126 if (!list->hdev || !list->hdev->debug) {
1127 ret = -EIO;
1128 set_current_state(TASK_RUNNING);
1129 goto out;
1137 } 1130 }
1138 1131
1139 set_current_state(TASK_RUNNING); 1132 /* allow O_NONBLOCK from other threads */
1140 remove_wait_queue(&list->hdev->debug_wait, &wait); 1133 mutex_unlock(&list->read_mutex);
1134 schedule();
1135 mutex_lock(&list->read_mutex);
1136 set_current_state(TASK_INTERRUPTIBLE);
1141 } 1137 }
1142 1138
1143 if (ret) 1139 __set_current_state(TASK_RUNNING);
1144 goto out; 1140 remove_wait_queue(&list->hdev->debug_wait, &wait);
1145 1141
1146 /* pass the ringbuffer contents to userspace */ 1142 if (ret)
1147copy_rest:
1148 if (list->tail == list->head)
1149 goto out; 1143 goto out;
1150 if (list->tail > list->head) {
1151 len = list->tail - list->head;
1152 if (len > count)
1153 len = count;
1154
1155 if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
1156 ret = -EFAULT;
1157 goto out;
1158 }
1159 ret += len;
1160 list->head += len;
1161 } else {
1162 len = HID_DEBUG_BUFSIZE - list->head;
1163 if (len > count)
1164 len = count;
1165
1166 if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
1167 ret = -EFAULT;
1168 goto out;
1169 }
1170 list->head = 0;
1171 ret += len;
1172 count -= len;
1173 if (count > 0)
1174 goto copy_rest;
1175 }
1176
1177 } 1144 }
1145
1146 /* pass the fifo content to userspace, locking is not needed with only
1147 * one concurrent reader and one concurrent writer
1148 */
1149 ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
1150 if (ret)
1151 goto out;
1152 ret = copied;
1178out: 1153out:
1179 mutex_unlock(&list->read_mutex); 1154 mutex_unlock(&list->read_mutex);
1180 return ret; 1155 return ret;
@@ -1185,7 +1160,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
1185 struct hid_debug_list *list = file->private_data; 1160 struct hid_debug_list *list = file->private_data;
1186 1161
1187 poll_wait(file, &list->hdev->debug_wait, wait); 1162 poll_wait(file, &list->hdev->debug_wait, wait);
1188 if (list->head != list->tail) 1163 if (!kfifo_is_empty(&list->hid_debug_fifo))
1189 return EPOLLIN | EPOLLRDNORM; 1164 return EPOLLIN | EPOLLRDNORM;
1190 if (!list->hdev->debug) 1165 if (!list->hdev->debug)
1191 return EPOLLERR | EPOLLHUP; 1166 return EPOLLERR | EPOLLHUP;
@@ -1200,7 +1175,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
1200 spin_lock_irqsave(&list->hdev->debug_list_lock, flags); 1175 spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
1201 list_del(&list->node); 1176 list_del(&list->node);
1202 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); 1177 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
1203 kfree(list->hid_debug_buf); 1178 kfifo_free(&list->hid_debug_fifo);
1204 kfree(list); 1179 kfree(list);
1205 1180
1206 return 0; 1181 return 0;
@@ -1246,4 +1221,3 @@ void hid_debug_exit(void)
1246{ 1221{
1247 debugfs_remove_recursive(hid_debug_root); 1222 debugfs_remove_recursive(hid_debug_root);
1248} 1223}
1249
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 518fa76414f5..24f846d67478 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -461,6 +461,9 @@
461#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a 461#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
462#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 462#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
463 463
464#define I2C_VENDOR_ID_GOODIX 0x27c6
465#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
466
464#define USB_VENDOR_ID_GOODTOUCH 0x1aad 467#define USB_VENDOR_ID_GOODTOUCH 0x1aad
465#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f 468#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f
466 469
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 8555ce7e737b..c5edfa966343 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -179,6 +179,8 @@ static const struct i2c_hid_quirks {
179 I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, 179 I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
180 { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001, 180 { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
181 I2C_HID_QUIRK_NO_RUNTIME_PM }, 181 I2C_HID_QUIRK_NO_RUNTIME_PM },
182 { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
183 I2C_HID_QUIRK_NO_RUNTIME_PM },
182 { 0, 0 } 184 { 0, 0 }
183}; 185};
184 186
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index ce0ba2062723..bea4c9850247 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -701,19 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
701int vmbus_disconnect_ring(struct vmbus_channel *channel) 701int vmbus_disconnect_ring(struct vmbus_channel *channel)
702{ 702{
703 struct vmbus_channel *cur_channel, *tmp; 703 struct vmbus_channel *cur_channel, *tmp;
704 unsigned long flags;
705 LIST_HEAD(list);
706 int ret; 704 int ret;
707 705
708 if (channel->primary_channel != NULL) 706 if (channel->primary_channel != NULL)
709 return -EINVAL; 707 return -EINVAL;
710 708
711 /* Snapshot the list of subchannels */ 709 list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
712 spin_lock_irqsave(&channel->lock, flags);
713 list_splice_init(&channel->sc_list, &list);
714 spin_unlock_irqrestore(&channel->lock, flags);
715
716 list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
717 if (cur_channel->rescind) 710 if (cur_channel->rescind)
718 wait_for_completion(&cur_channel->rescind_event); 711 wait_for_completion(&cur_channel->rescind_event);
719 712
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 5301fef16c31..7c6349a50ef1 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
888 pfn_cnt -= pgs_ol; 888 pfn_cnt -= pgs_ol;
889 /* 889 /*
890 * Check if the corresponding memory block is already 890 * Check if the corresponding memory block is already
891 * online by checking its last previously backed page. 891 * online. It is possible to observe struct pages still
892 * In case it is we need to bring rest (which was not 892 * being uninitialized here so check section instead.
893 * backed previously) online too. 893 * In case the section is online we need to bring the
894 * rest of pfns (which were not backed previously)
895 * online too.
894 */ 896 */
895 if (start_pfn > has->start_pfn && 897 if (start_pfn > has->start_pfn &&
896 !PageReserved(pfn_to_page(start_pfn - 1))) 898 online_section_nr(pfn_to_section_nr(start_pfn)))
897 hv_bring_pgs_online(has, start_pfn, pgs_ol); 899 hv_bring_pgs_online(has, start_pfn, pgs_ol);
898 900
899 } 901 }
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 64d0c85d5161..1f1a55e07733 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
164} 164}
165 165
166/* Get various debug metrics for the specified ring buffer. */ 166/* Get various debug metrics for the specified ring buffer. */
167void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, 167int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
168 struct hv_ring_buffer_debug_info *debug_info) 168 struct hv_ring_buffer_debug_info *debug_info)
169{ 169{
170 u32 bytes_avail_towrite; 170 u32 bytes_avail_towrite;
171 u32 bytes_avail_toread; 171 u32 bytes_avail_toread;
172 172
173 if (ring_info->ring_buffer) { 173 if (!ring_info->ring_buffer)
174 hv_get_ringbuffer_availbytes(ring_info, 174 return -EINVAL;
175 &bytes_avail_toread, 175
176 &bytes_avail_towrite); 176 hv_get_ringbuffer_availbytes(ring_info,
177 177 &bytes_avail_toread,
178 debug_info->bytes_avail_toread = bytes_avail_toread; 178 &bytes_avail_towrite);
179 debug_info->bytes_avail_towrite = bytes_avail_towrite; 179 debug_info->bytes_avail_toread = bytes_avail_toread;
180 debug_info->current_read_index = 180 debug_info->bytes_avail_towrite = bytes_avail_towrite;
181 ring_info->ring_buffer->read_index; 181 debug_info->current_read_index = ring_info->ring_buffer->read_index;
182 debug_info->current_write_index = 182 debug_info->current_write_index = ring_info->ring_buffer->write_index;
183 ring_info->ring_buffer->write_index; 183 debug_info->current_interrupt_mask
184 debug_info->current_interrupt_mask = 184 = ring_info->ring_buffer->interrupt_mask;
185 ring_info->ring_buffer->interrupt_mask; 185 return 0;
186 }
187} 186}
188EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); 187EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
189 188
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index d0ff65675292..403fee01572c 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
313{ 313{
314 struct hv_device *hv_dev = device_to_hv_device(dev); 314 struct hv_device *hv_dev = device_to_hv_device(dev);
315 struct hv_ring_buffer_debug_info outbound; 315 struct hv_ring_buffer_debug_info outbound;
316 int ret;
316 317
317 if (!hv_dev->channel) 318 if (!hv_dev->channel)
318 return -ENODEV; 319 return -ENODEV;
319 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 320
320 return -EINVAL; 321 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
321 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 322 &outbound);
323 if (ret < 0)
324 return ret;
325
322 return sprintf(buf, "%d\n", outbound.current_interrupt_mask); 326 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
323} 327}
324static DEVICE_ATTR_RO(out_intr_mask); 328static DEVICE_ATTR_RO(out_intr_mask);
@@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
328{ 332{
329 struct hv_device *hv_dev = device_to_hv_device(dev); 333 struct hv_device *hv_dev = device_to_hv_device(dev);
330 struct hv_ring_buffer_debug_info outbound; 334 struct hv_ring_buffer_debug_info outbound;
335 int ret;
331 336
332 if (!hv_dev->channel) 337 if (!hv_dev->channel)
333 return -ENODEV; 338 return -ENODEV;
334 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 339
335 return -EINVAL; 340 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
336 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 341 &outbound);
342 if (ret < 0)
343 return ret;
337 return sprintf(buf, "%d\n", outbound.current_read_index); 344 return sprintf(buf, "%d\n", outbound.current_read_index);
338} 345}
339static DEVICE_ATTR_RO(out_read_index); 346static DEVICE_ATTR_RO(out_read_index);
@@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
344{ 351{
345 struct hv_device *hv_dev = device_to_hv_device(dev); 352 struct hv_device *hv_dev = device_to_hv_device(dev);
346 struct hv_ring_buffer_debug_info outbound; 353 struct hv_ring_buffer_debug_info outbound;
354 int ret;
347 355
348 if (!hv_dev->channel) 356 if (!hv_dev->channel)
349 return -ENODEV; 357 return -ENODEV;
350 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 358
351 return -EINVAL; 359 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
352 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 360 &outbound);
361 if (ret < 0)
362 return ret;
353 return sprintf(buf, "%d\n", outbound.current_write_index); 363 return sprintf(buf, "%d\n", outbound.current_write_index);
354} 364}
355static DEVICE_ATTR_RO(out_write_index); 365static DEVICE_ATTR_RO(out_write_index);
@@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
360{ 370{
361 struct hv_device *hv_dev = device_to_hv_device(dev); 371 struct hv_device *hv_dev = device_to_hv_device(dev);
362 struct hv_ring_buffer_debug_info outbound; 372 struct hv_ring_buffer_debug_info outbound;
373 int ret;
363 374
364 if (!hv_dev->channel) 375 if (!hv_dev->channel)
365 return -ENODEV; 376 return -ENODEV;
366 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 377
367 return -EINVAL; 378 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
368 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 379 &outbound);
380 if (ret < 0)
381 return ret;
369 return sprintf(buf, "%d\n", outbound.bytes_avail_toread); 382 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
370} 383}
371static DEVICE_ATTR_RO(out_read_bytes_avail); 384static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
376{ 389{
377 struct hv_device *hv_dev = device_to_hv_device(dev); 390 struct hv_device *hv_dev = device_to_hv_device(dev);
378 struct hv_ring_buffer_debug_info outbound; 391 struct hv_ring_buffer_debug_info outbound;
392 int ret;
379 393
380 if (!hv_dev->channel) 394 if (!hv_dev->channel)
381 return -ENODEV; 395 return -ENODEV;
382 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 396
383 return -EINVAL; 397 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
384 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 398 &outbound);
399 if (ret < 0)
400 return ret;
385 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); 401 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
386} 402}
387static DEVICE_ATTR_RO(out_write_bytes_avail); 403static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
391{ 407{
392 struct hv_device *hv_dev = device_to_hv_device(dev); 408 struct hv_device *hv_dev = device_to_hv_device(dev);
393 struct hv_ring_buffer_debug_info inbound; 409 struct hv_ring_buffer_debug_info inbound;
410 int ret;
394 411
395 if (!hv_dev->channel) 412 if (!hv_dev->channel)
396 return -ENODEV; 413 return -ENODEV;
397 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 414
398 return -EINVAL; 415 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
399 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 416 if (ret < 0)
417 return ret;
418
400 return sprintf(buf, "%d\n", inbound.current_interrupt_mask); 419 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
401} 420}
402static DEVICE_ATTR_RO(in_intr_mask); 421static DEVICE_ATTR_RO(in_intr_mask);
@@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
406{ 425{
407 struct hv_device *hv_dev = device_to_hv_device(dev); 426 struct hv_device *hv_dev = device_to_hv_device(dev);
408 struct hv_ring_buffer_debug_info inbound; 427 struct hv_ring_buffer_debug_info inbound;
428 int ret;
409 429
410 if (!hv_dev->channel) 430 if (!hv_dev->channel)
411 return -ENODEV; 431 return -ENODEV;
412 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 432
413 return -EINVAL; 433 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
414 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 434 if (ret < 0)
435 return ret;
436
415 return sprintf(buf, "%d\n", inbound.current_read_index); 437 return sprintf(buf, "%d\n", inbound.current_read_index);
416} 438}
417static DEVICE_ATTR_RO(in_read_index); 439static DEVICE_ATTR_RO(in_read_index);
@@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
421{ 443{
422 struct hv_device *hv_dev = device_to_hv_device(dev); 444 struct hv_device *hv_dev = device_to_hv_device(dev);
423 struct hv_ring_buffer_debug_info inbound; 445 struct hv_ring_buffer_debug_info inbound;
446 int ret;
424 447
425 if (!hv_dev->channel) 448 if (!hv_dev->channel)
426 return -ENODEV; 449 return -ENODEV;
427 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 450
428 return -EINVAL; 451 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
429 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 452 if (ret < 0)
453 return ret;
454
430 return sprintf(buf, "%d\n", inbound.current_write_index); 455 return sprintf(buf, "%d\n", inbound.current_write_index);
431} 456}
432static DEVICE_ATTR_RO(in_write_index); 457static DEVICE_ATTR_RO(in_write_index);
@@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
437{ 462{
438 struct hv_device *hv_dev = device_to_hv_device(dev); 463 struct hv_device *hv_dev = device_to_hv_device(dev);
439 struct hv_ring_buffer_debug_info inbound; 464 struct hv_ring_buffer_debug_info inbound;
465 int ret;
440 466
441 if (!hv_dev->channel) 467 if (!hv_dev->channel)
442 return -ENODEV; 468 return -ENODEV;
443 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 469
444 return -EINVAL; 470 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
445 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 471 if (ret < 0)
472 return ret;
473
446 return sprintf(buf, "%d\n", inbound.bytes_avail_toread); 474 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
447} 475}
448static DEVICE_ATTR_RO(in_read_bytes_avail); 476static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
453{ 481{
454 struct hv_device *hv_dev = device_to_hv_device(dev); 482 struct hv_device *hv_dev = device_to_hv_device(dev);
455 struct hv_ring_buffer_debug_info inbound; 483 struct hv_ring_buffer_debug_info inbound;
484 int ret;
456 485
457 if (!hv_dev->channel) 486 if (!hv_dev->channel)
458 return -ENODEV; 487 return -ENODEV;
459 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 488
460 return -EINVAL; 489 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
461 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 490 if (ret < 0)
491 return ret;
492
462 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); 493 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
463} 494}
464static DEVICE_ATTR_RO(in_write_bytes_avail); 495static DEVICE_ATTR_RO(in_write_bytes_avail);
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 0e30fa00204c..f9b8e3e23a8e 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
393 } 393 }
394 394
395 rv = lm80_read_value(client, LM80_REG_FANDIV); 395 rv = lm80_read_value(client, LM80_REG_FANDIV);
396 if (rv < 0) 396 if (rv < 0) {
397 mutex_unlock(&data->update_lock);
397 return rv; 398 return rv;
399 }
398 reg = (rv & ~(3 << (2 * (nr + 1)))) 400 reg = (rv & ~(3 << (2 * (nr + 1))))
399 | (data->fan_div[nr] << (2 * (nr + 1))); 401 | (data->fan_div[nr] << (2 * (nr + 1)));
400 lm80_write_value(client, LM80_REG_FANDIV, reg); 402 lm80_write_value(client, LM80_REG_FANDIV, reg);
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index c3040079b1cb..59ee01f3d022 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -44,8 +44,8 @@
44 * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 44 * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3
45 * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 45 * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3
46 * (0xd451) 46 * (0xd451)
47 * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3 47 * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3
48 * (0xd459) 48 * (0xd429)
49 * 49 *
50 * #temp lists the number of monitored temperature sources (first value) plus 50 * #temp lists the number of monitored temperature sources (first value) plus
51 * the number of directly connectable temperature sensors (second value). 51 * the number of directly connectable temperature sensors (second value).
@@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
138#define SIO_NCT6795_ID 0xd350 138#define SIO_NCT6795_ID 0xd350
139#define SIO_NCT6796_ID 0xd420 139#define SIO_NCT6796_ID 0xd420
140#define SIO_NCT6797_ID 0xd450 140#define SIO_NCT6797_ID 0xd450
141#define SIO_NCT6798_ID 0xd458 141#define SIO_NCT6798_ID 0xd428
142#define SIO_ID_MASK 0xFFF8 142#define SIO_ID_MASK 0xFFF8
143 143
144enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; 144enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -3594,7 +3594,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
3594 fan5pin |= cr1b & BIT(5); 3594 fan5pin |= cr1b & BIT(5);
3595 fan5pin |= creb & BIT(5); 3595 fan5pin |= creb & BIT(5);
3596 3596
3597 fan6pin = creb & BIT(3); 3597 fan6pin = !dsw_en && (cr2d & BIT(1));
3598 fan6pin |= creb & BIT(3);
3598 3599
3599 pwm5pin |= cr2d & BIT(7); 3600 pwm5pin |= cr2d & BIT(7);
3600 pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); 3601 pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0));
@@ -4508,7 +4509,8 @@ static int __maybe_unused nct6775_resume(struct device *dev)
4508 4509
4509 if (data->kind == nct6791 || data->kind == nct6792 || 4510 if (data->kind == nct6791 || data->kind == nct6792 ||
4510 data->kind == nct6793 || data->kind == nct6795 || 4511 data->kind == nct6793 || data->kind == nct6795 ||
4511 data->kind == nct6796) 4512 data->kind == nct6796 || data->kind == nct6797 ||
4513 data->kind == nct6798)
4512 nct6791_enable_io_mapping(sioreg); 4514 nct6791_enable_io_mapping(sioreg);
4513 4515
4514 superio_exit(sioreg); 4516 superio_exit(sioreg);
@@ -4644,7 +4646,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
4644 4646
4645 if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || 4647 if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
4646 sio_data->kind == nct6793 || sio_data->kind == nct6795 || 4648 sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
4647 sio_data->kind == nct6796) 4649 sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
4650 sio_data->kind == nct6798)
4648 nct6791_enable_io_mapping(sioaddr); 4651 nct6791_enable_io_mapping(sioaddr);
4649 4652
4650 superio_exit(sioaddr); 4653 superio_exit(sioaddr);
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 423903f87955..391118c8aae8 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -380,8 +380,8 @@ static ssize_t occ_show_power_1(struct device *dev,
380 val *= 1000000ULL; 380 val *= 1000000ULL;
381 break; 381 break;
382 case 2: 382 case 2:
383 val = get_unaligned_be32(&power->update_tag) * 383 val = (u64)get_unaligned_be32(&power->update_tag) *
384 occ->powr_sample_time_us; 384 occ->powr_sample_time_us;
385 break; 385 break;
386 case 3: 386 case 3:
387 val = get_unaligned_be16(&power->value) * 1000000ULL; 387 val = get_unaligned_be16(&power->value) * 1000000ULL;
@@ -425,8 +425,8 @@ static ssize_t occ_show_power_2(struct device *dev,
425 &power->update_tag); 425 &power->update_tag);
426 break; 426 break;
427 case 2: 427 case 2:
428 val = get_unaligned_be32(&power->update_tag) * 428 val = (u64)get_unaligned_be32(&power->update_tag) *
429 occ->powr_sample_time_us; 429 occ->powr_sample_time_us;
430 break; 430 break;
431 case 3: 431 case 3:
432 val = get_unaligned_be16(&power->value) * 1000000ULL; 432 val = get_unaligned_be16(&power->value) * 1000000ULL;
@@ -463,8 +463,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
463 &power->system.update_tag); 463 &power->system.update_tag);
464 break; 464 break;
465 case 2: 465 case 2:
466 val = get_unaligned_be32(&power->system.update_tag) * 466 val = (u64)get_unaligned_be32(&power->system.update_tag) *
467 occ->powr_sample_time_us; 467 occ->powr_sample_time_us;
468 break; 468 break;
469 case 3: 469 case 3:
470 val = get_unaligned_be16(&power->system.value) * 1000000ULL; 470 val = get_unaligned_be16(&power->system.value) * 1000000ULL;
@@ -477,8 +477,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
477 &power->proc.update_tag); 477 &power->proc.update_tag);
478 break; 478 break;
479 case 6: 479 case 6:
480 val = get_unaligned_be32(&power->proc.update_tag) * 480 val = (u64)get_unaligned_be32(&power->proc.update_tag) *
481 occ->powr_sample_time_us; 481 occ->powr_sample_time_us;
482 break; 482 break;
483 case 7: 483 case 7:
484 val = get_unaligned_be16(&power->proc.value) * 1000000ULL; 484 val = get_unaligned_be16(&power->proc.value) * 1000000ULL;
@@ -491,8 +491,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
491 &power->vdd.update_tag); 491 &power->vdd.update_tag);
492 break; 492 break;
493 case 10: 493 case 10:
494 val = get_unaligned_be32(&power->vdd.update_tag) * 494 val = (u64)get_unaligned_be32(&power->vdd.update_tag) *
495 occ->powr_sample_time_us; 495 occ->powr_sample_time_us;
496 break; 496 break;
497 case 11: 497 case 11:
498 val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; 498 val = get_unaligned_be16(&power->vdd.value) * 1000000ULL;
@@ -505,8 +505,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
505 &power->vdn.update_tag); 505 &power->vdn.update_tag);
506 break; 506 break;
507 case 14: 507 case 14:
508 val = get_unaligned_be32(&power->vdn.update_tag) * 508 val = (u64)get_unaligned_be32(&power->vdn.update_tag) *
509 occ->powr_sample_time_us; 509 occ->powr_sample_time_us;
510 break; 510 break;
511 case 15: 511 case 15:
512 val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; 512 val = get_unaligned_be16(&power->vdn.value) * 1000000ULL;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 8844c9565d2a..7053be59ad2e 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
88 .data = (void *)2 88 .data = (void *)2
89 }, 89 },
90 { 90 {
91 .compatible = "ti,tmp422", 91 .compatible = "ti,tmp442",
92 .data = (void *)3 92 .data = (void *)3
93 }, 93 },
94 { }, 94 { },
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index ec6e69aa3a8e..d2fbb4bb4a43 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
183 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); 183 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
184} 184}
185 185
186static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev)
187{
188 i2c_dev->curr_msg = NULL;
189 i2c_dev->num_msgs = 0;
190
191 i2c_dev->msg_buf = NULL;
192 i2c_dev->msg_buf_remaining = 0;
193}
194
186/* 195/*
187 * Note about I2C_C_CLEAR on error: 196 * Note about I2C_C_CLEAR on error:
188 * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in 197 * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
@@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
283 292
284 time_left = wait_for_completion_timeout(&i2c_dev->completion, 293 time_left = wait_for_completion_timeout(&i2c_dev->completion,
285 adap->timeout); 294 adap->timeout);
295
296 bcm2835_i2c_finish_transfer(i2c_dev);
297
286 if (!time_left) { 298 if (!time_left) {
287 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 299 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
288 BCM2835_I2C_C_CLEAR); 300 BCM2835_I2C_C_CLEAR);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index b13605718291..d917cefc5a19 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
382 * Check for the message size against FIFO depth and set the 382 * Check for the message size against FIFO depth and set the
383 * 'hold bus' bit if it is greater than FIFO depth. 383 * 'hold bus' bit if it is greater than FIFO depth.
384 */ 384 */
385 if (id->recv_count > CDNS_I2C_FIFO_DEPTH) 385 if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
386 ctrl_reg |= CDNS_I2C_CR_HOLD; 386 ctrl_reg |= CDNS_I2C_CR_HOLD;
387 else
388 ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
387 389
388 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); 390 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
389 391
@@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
440 * Check for the message size against FIFO depth and set the 442 * Check for the message size against FIFO depth and set the
441 * 'hold bus' bit if it is greater than FIFO depth. 443 * 'hold bus' bit if it is greater than FIFO depth.
442 */ 444 */
443 if (id->send_count > CDNS_I2C_FIFO_DEPTH) 445 if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
444 ctrl_reg |= CDNS_I2C_CR_HOLD; 446 ctrl_reg |= CDNS_I2C_CR_HOLD;
447 else
448 ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
449
445 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); 450 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
446 451
447 /* Clear the interrupts in interrupt status register. */ 452 /* Clear the interrupts in interrupt status register. */
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b1086bfb0465..cd9c65f3d404 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1500,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
1500 return 0; 1500 return 0;
1501} 1501}
1502 1502
1503#ifdef CONFIG_PM 1503static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
1504static int omap_i2c_runtime_suspend(struct device *dev)
1505{ 1504{
1506 struct omap_i2c_dev *omap = dev_get_drvdata(dev); 1505 struct omap_i2c_dev *omap = dev_get_drvdata(dev);
1507 1506
@@ -1527,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev)
1527 return 0; 1526 return 0;
1528} 1527}
1529 1528
1530static int omap_i2c_runtime_resume(struct device *dev) 1529static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
1531{ 1530{
1532 struct omap_i2c_dev *omap = dev_get_drvdata(dev); 1531 struct omap_i2c_dev *omap = dev_get_drvdata(dev);
1533 1532
@@ -1542,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev)
1542} 1541}
1543 1542
1544static const struct dev_pm_ops omap_i2c_pm_ops = { 1543static const struct dev_pm_ops omap_i2c_pm_ops = {
1544 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1545 pm_runtime_force_resume)
1545 SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, 1546 SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
1546 omap_i2c_runtime_resume, NULL) 1547 omap_i2c_runtime_resume, NULL)
1547}; 1548};
1548#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
1549#else
1550#define OMAP_I2C_PM_OPS NULL
1551#endif /* CONFIG_PM */
1552 1549
1553static struct platform_driver omap_i2c_driver = { 1550static struct platform_driver omap_i2c_driver = {
1554 .probe = omap_i2c_probe, 1551 .probe = omap_i2c_probe,
1555 .remove = omap_i2c_remove, 1552 .remove = omap_i2c_remove,
1556 .driver = { 1553 .driver = {
1557 .name = "omap_i2c", 1554 .name = "omap_i2c",
1558 .pm = OMAP_I2C_PM_OPS, 1555 .pm = &omap_i2c_pm_ops,
1559 .of_match_table = of_match_ptr(omap_i2c_of_match), 1556 .of_match_table = of_match_ptr(omap_i2c_of_match),
1560 }, 1557 },
1561}; 1558};
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index e417ebf7628c..c77adbbea0c7 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -155,6 +155,8 @@ enum msg_end_type {
155 * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that 155 * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that
156 * provides additional features and allows for longer messages to 156 * provides additional features and allows for longer messages to
157 * be transferred in one go. 157 * be transferred in one go.
158 * @quirks: i2c adapter quirks for limiting write/read transfer size and not
159 * allowing 0 length transfers.
158 */ 160 */
159struct tegra_i2c_hw_feature { 161struct tegra_i2c_hw_feature {
160 bool has_continue_xfer_support; 162 bool has_continue_xfer_support;
@@ -167,6 +169,7 @@ struct tegra_i2c_hw_feature {
167 bool has_multi_master_mode; 169 bool has_multi_master_mode;
168 bool has_slcg_override_reg; 170 bool has_slcg_override_reg;
169 bool has_mst_fifo; 171 bool has_mst_fifo;
172 const struct i2c_adapter_quirks *quirks;
170}; 173};
171 174
172/** 175/**
@@ -837,6 +840,10 @@ static const struct i2c_adapter_quirks tegra_i2c_quirks = {
837 .max_write_len = 4096, 840 .max_write_len = 4096,
838}; 841};
839 842
843static const struct i2c_adapter_quirks tegra194_i2c_quirks = {
844 .flags = I2C_AQ_NO_ZERO_LEN,
845};
846
840static const struct tegra_i2c_hw_feature tegra20_i2c_hw = { 847static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
841 .has_continue_xfer_support = false, 848 .has_continue_xfer_support = false,
842 .has_per_pkt_xfer_complete_irq = false, 849 .has_per_pkt_xfer_complete_irq = false,
@@ -848,6 +855,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
848 .has_multi_master_mode = false, 855 .has_multi_master_mode = false,
849 .has_slcg_override_reg = false, 856 .has_slcg_override_reg = false,
850 .has_mst_fifo = false, 857 .has_mst_fifo = false,
858 .quirks = &tegra_i2c_quirks,
851}; 859};
852 860
853static const struct tegra_i2c_hw_feature tegra30_i2c_hw = { 861static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -861,6 +869,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
861 .has_multi_master_mode = false, 869 .has_multi_master_mode = false,
862 .has_slcg_override_reg = false, 870 .has_slcg_override_reg = false,
863 .has_mst_fifo = false, 871 .has_mst_fifo = false,
872 .quirks = &tegra_i2c_quirks,
864}; 873};
865 874
866static const struct tegra_i2c_hw_feature tegra114_i2c_hw = { 875static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -874,6 +883,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
874 .has_multi_master_mode = false, 883 .has_multi_master_mode = false,
875 .has_slcg_override_reg = false, 884 .has_slcg_override_reg = false,
876 .has_mst_fifo = false, 885 .has_mst_fifo = false,
886 .quirks = &tegra_i2c_quirks,
877}; 887};
878 888
879static const struct tegra_i2c_hw_feature tegra124_i2c_hw = { 889static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
@@ -887,6 +897,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
887 .has_multi_master_mode = false, 897 .has_multi_master_mode = false,
888 .has_slcg_override_reg = true, 898 .has_slcg_override_reg = true,
889 .has_mst_fifo = false, 899 .has_mst_fifo = false,
900 .quirks = &tegra_i2c_quirks,
890}; 901};
891 902
892static const struct tegra_i2c_hw_feature tegra210_i2c_hw = { 903static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
@@ -900,6 +911,7 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
900 .has_multi_master_mode = true, 911 .has_multi_master_mode = true,
901 .has_slcg_override_reg = true, 912 .has_slcg_override_reg = true,
902 .has_mst_fifo = false, 913 .has_mst_fifo = false,
914 .quirks = &tegra_i2c_quirks,
903}; 915};
904 916
905static const struct tegra_i2c_hw_feature tegra194_i2c_hw = { 917static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
@@ -913,6 +925,7 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
913 .has_multi_master_mode = true, 925 .has_multi_master_mode = true,
914 .has_slcg_override_reg = true, 926 .has_slcg_override_reg = true,
915 .has_mst_fifo = true, 927 .has_mst_fifo = true,
928 .quirks = &tegra194_i2c_quirks,
916}; 929};
917 930
918/* Match table for of_platform binding */ 931/* Match table for of_platform binding */
@@ -964,7 +977,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
964 i2c_dev->base = base; 977 i2c_dev->base = base;
965 i2c_dev->div_clk = div_clk; 978 i2c_dev->div_clk = div_clk;
966 i2c_dev->adapter.algo = &tegra_i2c_algo; 979 i2c_dev->adapter.algo = &tegra_i2c_algo;
967 i2c_dev->adapter.quirks = &tegra_i2c_quirks;
968 i2c_dev->irq = irq; 980 i2c_dev->irq = irq;
969 i2c_dev->cont_id = pdev->id; 981 i2c_dev->cont_id = pdev->id;
970 i2c_dev->dev = &pdev->dev; 982 i2c_dev->dev = &pdev->dev;
@@ -980,6 +992,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
980 i2c_dev->hw = of_device_get_match_data(&pdev->dev); 992 i2c_dev->hw = of_device_get_match_data(&pdev->dev);
981 i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node, 993 i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
982 "nvidia,tegra20-i2c-dvc"); 994 "nvidia,tegra20-i2c-dvc");
995 i2c_dev->adapter.quirks = i2c_dev->hw->quirks;
983 init_completion(&i2c_dev->msg_complete); 996 init_completion(&i2c_dev->msg_complete);
984 spin_lock_init(&i2c_dev->xfer_lock); 997 spin_lock_init(&i2c_dev->xfer_lock);
985 998
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 1aca742fde4a..ccd76c71af09 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -470,9 +470,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
470 data_arg.data); 470 data_arg.data);
471 } 471 }
472 case I2C_RETRIES: 472 case I2C_RETRIES:
473 if (arg > INT_MAX)
474 return -EINVAL;
475
473 client->adapter->retries = arg; 476 client->adapter->retries = arg;
474 break; 477 break;
475 case I2C_TIMEOUT: 478 case I2C_TIMEOUT:
479 if (arg > INT_MAX)
480 return -EINVAL;
481
476 /* For historical reasons, user-space sets the timeout 482 /* For historical reasons, user-space sets the timeout
477 * value in units of 10 ms. 483 * value in units of 10 ms.
478 */ 484 */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index c39f89d2deba..2dc628d4f1ae 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1828,7 +1828,7 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
1828 1828
1829 ret = i3c_master_retrieve_dev_info(newdev); 1829 ret = i3c_master_retrieve_dev_info(newdev);
1830 if (ret) 1830 if (ret)
1831 goto err_free_dev; 1831 goto err_detach_dev;
1832 1832
1833 olddev = i3c_master_search_i3c_dev_duplicate(newdev); 1833 olddev = i3c_master_search_i3c_dev_duplicate(newdev);
1834 if (olddev) { 1834 if (olddev) {
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index b532e2c9cf5c..bb03079fbade 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -419,12 +419,9 @@ static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
419 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 419 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
420} 420}
421 421
422static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, 422static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master,
423 struct dw_i3c_xfer *xfer) 423 struct dw_i3c_xfer *xfer)
424{ 424{
425 unsigned long flags;
426
427 spin_lock_irqsave(&master->xferqueue.lock, flags);
428 if (master->xferqueue.cur == xfer) { 425 if (master->xferqueue.cur == xfer) {
429 u32 status; 426 u32 status;
430 427
@@ -439,6 +436,15 @@ static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
439 } else { 436 } else {
440 list_del_init(&xfer->node); 437 list_del_init(&xfer->node);
441 } 438 }
439}
440
441static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
442 struct dw_i3c_xfer *xfer)
443{
444 unsigned long flags;
445
446 spin_lock_irqsave(&master->xferqueue.lock, flags);
447 dw_i3c_master_dequeue_xfer_locked(master, xfer);
442 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 448 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
443} 449}
444 450
@@ -494,7 +500,7 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
494 complete(&xfer->comp); 500 complete(&xfer->comp);
495 501
496 if (ret < 0) { 502 if (ret < 0) {
497 dw_i3c_master_dequeue_xfer(master, xfer); 503 dw_i3c_master_dequeue_xfer_locked(master, xfer);
498 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, 504 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
499 master->regs + DEVICE_CTRL); 505 master->regs + DEVICE_CTRL);
500 } 506 }
@@ -901,9 +907,6 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
901 master->regs + 907 master->regs +
902 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 908 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
903 909
904 if (!old_dyn_addr)
905 return 0;
906
907 master->addrs[data->index] = dev->info.dyn_addr; 910 master->addrs[data->index] = dev->info.dyn_addr;
908 911
909 return 0; 912 return 0;
@@ -925,11 +928,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
925 return -ENOMEM; 928 return -ENOMEM;
926 929
927 data->index = pos; 930 data->index = pos;
928 master->addrs[pos] = dev->info.dyn_addr; 931 master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
929 master->free_pos &= ~BIT(pos); 932 master->free_pos &= ~BIT(pos);
930 i3c_dev_set_master_data(dev, data); 933 i3c_dev_set_master_data(dev, data);
931 934
932 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), 935 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]),
933 master->regs + 936 master->regs +
934 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 937 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
935 938
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index bbd79b8b1a80..8889a4fdb454 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -1556,8 +1556,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
1556 return PTR_ERR(master->pclk); 1556 return PTR_ERR(master->pclk);
1557 1557
1558 master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); 1558 master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
1559 if (IS_ERR(master->pclk)) 1559 if (IS_ERR(master->sysclk))
1560 return PTR_ERR(master->pclk); 1560 return PTR_ERR(master->sysclk);
1561 1561
1562 irq = platform_get_irq(pdev, 0); 1562 irq = platform_get_irq(pdev, 0);
1563 if (irq < 0) 1563 if (irq < 0)
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index da58020a144e..33a28cde126c 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
235 235
236int ide_queue_sense_rq(ide_drive_t *drive, void *special) 236int ide_queue_sense_rq(ide_drive_t *drive, void *special)
237{ 237{
238 struct request *sense_rq = drive->sense_rq; 238 ide_hwif_t *hwif = drive->hwif;
239 struct request *sense_rq;
240 unsigned long flags;
241
242 spin_lock_irqsave(&hwif->lock, flags);
239 243
240 /* deferred failure from ide_prep_sense() */ 244 /* deferred failure from ide_prep_sense() */
241 if (!drive->sense_rq_armed) { 245 if (!drive->sense_rq_armed) {
242 printk(KERN_WARNING PFX "%s: error queuing a sense request\n", 246 printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
243 drive->name); 247 drive->name);
248 spin_unlock_irqrestore(&hwif->lock, flags);
244 return -ENOMEM; 249 return -ENOMEM;
245 } 250 }
246 251
252 sense_rq = drive->sense_rq;
247 ide_req(sense_rq)->special = special; 253 ide_req(sense_rq)->special = special;
248 drive->sense_rq_armed = false; 254 drive->sense_rq_armed = false;
249 255
250 drive->hwif->rq = NULL; 256 drive->hwif->rq = NULL;
251 257
252 ide_insert_request_head(drive, sense_rq); 258 ide_insert_request_head(drive, sense_rq);
259 spin_unlock_irqrestore(&hwif->lock, flags);
253 return 0; 260 return 0;
254} 261}
255EXPORT_SYMBOL_GPL(ide_queue_sense_rq); 262EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8445b484ae69..b137f27a34d5 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
68 } 68 }
69 69
70 if (!blk_update_request(rq, error, nr_bytes)) { 70 if (!blk_update_request(rq, error, nr_bytes)) {
71 if (rq == drive->sense_rq) 71 if (rq == drive->sense_rq) {
72 drive->sense_rq = NULL; 72 drive->sense_rq = NULL;
73 drive->sense_rq_active = false;
74 }
73 75
74 __blk_mq_end_request(rq, error); 76 __blk_mq_end_request(rq, error);
75 return 0; 77 return 0;
@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
451 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); 453 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
452} 454}
453 455
454/* 456blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
455 * Issue a new request to a device. 457 bool local_requeue)
456 */
457blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
458 const struct blk_mq_queue_data *bd)
459{ 458{
460 ide_drive_t *drive = hctx->queue->queuedata; 459 ide_hwif_t *hwif = drive->hwif;
461 ide_hwif_t *hwif = drive->hwif;
462 struct ide_host *host = hwif->host; 460 struct ide_host *host = hwif->host;
463 struct request *rq = bd->rq;
464 ide_startstop_t startstop; 461 ide_startstop_t startstop;
465 462
466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { 463 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
474 if (ide_lock_host(host, hwif)) 471 if (ide_lock_host(host, hwif))
475 return BLK_STS_DEV_RESOURCE; 472 return BLK_STS_DEV_RESOURCE;
476 473
477 blk_mq_start_request(rq);
478
479 spin_lock_irq(&hwif->lock); 474 spin_lock_irq(&hwif->lock);
480 475
481 if (!ide_lock_port(hwif)) { 476 if (!ide_lock_port(hwif)) {
@@ -511,18 +506,6 @@ repeat:
511 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 506 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
512 507
513 /* 508 /*
514 * we know that the queue isn't empty, but this can happen
515 * if ->prep_rq() decides to kill a request
516 */
517 if (!rq) {
518 rq = bd->rq;
519 if (!rq) {
520 ide_unlock_port(hwif);
521 goto out;
522 }
523 }
524
525 /*
526 * Sanity: don't accept a request that isn't a PM request 509 * Sanity: don't accept a request that isn't a PM request
527 * if we are currently power managed. This is very important as 510 * if we are currently power managed. This is very important as
528 * blk_stop_queue() doesn't prevent the blk_fetch_request() 511 * blk_stop_queue() doesn't prevent the blk_fetch_request()
@@ -560,9 +543,12 @@ repeat:
560 } 543 }
561 } else { 544 } else {
562plug_device: 545plug_device:
546 if (local_requeue)
547 list_add(&rq->queuelist, &drive->rq_list);
563 spin_unlock_irq(&hwif->lock); 548 spin_unlock_irq(&hwif->lock);
564 ide_unlock_host(host); 549 ide_unlock_host(host);
565 ide_requeue_and_plug(drive, rq); 550 if (!local_requeue)
551 ide_requeue_and_plug(drive, rq);
566 return BLK_STS_OK; 552 return BLK_STS_OK;
567 } 553 }
568 554
@@ -573,6 +559,26 @@ out:
573 return BLK_STS_OK; 559 return BLK_STS_OK;
574} 560}
575 561
562/*
563 * Issue a new request to a device.
564 */
565blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
566 const struct blk_mq_queue_data *bd)
567{
568 ide_drive_t *drive = hctx->queue->queuedata;
569 ide_hwif_t *hwif = drive->hwif;
570
571 spin_lock_irq(&hwif->lock);
572 if (drive->sense_rq_active) {
573 spin_unlock_irq(&hwif->lock);
574 return BLK_STS_DEV_RESOURCE;
575 }
576 spin_unlock_irq(&hwif->lock);
577
578 blk_mq_start_request(bd->rq);
579 return ide_issue_rq(drive, bd->rq, false);
580}
581
576static int drive_is_ready(ide_drive_t *drive) 582static int drive_is_ready(ide_drive_t *drive)
577{ 583{
578 ide_hwif_t *hwif = drive->hwif; 584 ide_hwif_t *hwif = drive->hwif;
@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
893 899
894void ide_insert_request_head(ide_drive_t *drive, struct request *rq) 900void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
895{ 901{
896 ide_hwif_t *hwif = drive->hwif; 902 drive->sense_rq_active = true;
897 unsigned long flags;
898
899 spin_lock_irqsave(&hwif->lock, flags);
900 list_add_tail(&rq->queuelist, &drive->rq_list); 903 list_add_tail(&rq->queuelist, &drive->rq_list);
901 spin_unlock_irqrestore(&hwif->lock, flags);
902
903 kblockd_schedule_work(&drive->rq_work); 904 kblockd_schedule_work(&drive->rq_work);
904} 905}
905EXPORT_SYMBOL_GPL(ide_insert_request_head); 906EXPORT_SYMBOL_GPL(ide_insert_request_head);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 102aa3bc3e7f..8af7af6001eb 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; 54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
55 scsi_req(rq)->cmd_len = 1; 55 scsi_req(rq)->cmd_len = 1;
56 ide_req(rq)->type = ATA_PRIV_MISC; 56 ide_req(rq)->type = ATA_PRIV_MISC;
57 spin_lock_irq(&hwif->lock);
57 ide_insert_request_head(drive, rq); 58 ide_insert_request_head(drive, rq);
59 spin_unlock_irq(&hwif->lock);
58 60
59out: 61out:
60 return; 62 return;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 63627be0811a..5aeaca24a28f 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); 1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
1160 ide_hwif_t *hwif = drive->hwif; 1160 ide_hwif_t *hwif = drive->hwif;
1161 struct request *rq; 1161 struct request *rq;
1162 blk_status_t ret;
1162 LIST_HEAD(list); 1163 LIST_HEAD(list);
1163 1164
1164 spin_lock_irq(&hwif->lock); 1165 blk_mq_quiesce_queue(drive->queue);
1165 if (!list_empty(&drive->rq_list))
1166 list_splice_init(&drive->rq_list, &list);
1167 spin_unlock_irq(&hwif->lock);
1168 1166
1169 while (!list_empty(&list)) { 1167 ret = BLK_STS_OK;
1170 rq = list_first_entry(&list, struct request, queuelist); 1168 spin_lock_irq(&hwif->lock);
1169 while (!list_empty(&drive->rq_list)) {
1170 rq = list_first_entry(&drive->rq_list, struct request, queuelist);
1171 list_del_init(&rq->queuelist); 1171 list_del_init(&rq->queuelist);
1172 blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL); 1172
1173 spin_unlock_irq(&hwif->lock);
1174 ret = ide_issue_rq(drive, rq, true);
1175 spin_lock_irq(&hwif->lock);
1173 } 1176 }
1177 spin_unlock_irq(&hwif->lock);
1178
1179 blk_mq_unquiesce_queue(drive->queue);
1180
1181 if (ret != BLK_STS_OK)
1182 kblockd_schedule_work(&drive->rq_work);
1174} 1183}
1175 1184
1176static const u8 ide_hwif_to_major[] = 1185static const u8 ide_hwif_to_major[] =
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 4c8c7a620d08..a5dc13576394 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
544 drive->proc = proc_mkdir(drive->name, parent); 544 drive->proc = proc_mkdir(drive->name, parent);
545 if (drive->proc) { 545 if (drive->proc) {
546 ide_add_proc_entries(drive->proc, generic_drive_entries, drive); 546 ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
547 proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR, 547 proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
548 drive->proc, &ide_settings_proc_fops, 548 drive->proc, &ide_settings_proc_fops,
549 drive); 549 drive);
550 } 550 }
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 031d568b4972..4e339cfd0c54 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -27,9 +27,18 @@
27#include <linux/iio/machine.h> 27#include <linux/iio/machine.h>
28#include <linux/iio/driver.h> 28#include <linux/iio/driver.h>
29 29
30#define AXP288_ADC_EN_MASK 0xF1 30/*
31#define AXP288_ADC_TS_PIN_GPADC 0xF2 31 * This mask enables all ADCs except for the battery temp-sensor (TS), that is
32#define AXP288_ADC_TS_PIN_ON 0xF3 32 * left as-is to avoid breaking charging on devices without a temp-sensor.
33 */
34#define AXP288_ADC_EN_MASK 0xF0
35#define AXP288_ADC_TS_ENABLE 0x01
36
37#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
38#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
39#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
40#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
41#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
33 42
34enum axp288_adc_id { 43enum axp288_adc_id {
35 AXP288_ADC_TS, 44 AXP288_ADC_TS,
@@ -44,6 +53,7 @@ enum axp288_adc_id {
44struct axp288_adc_info { 53struct axp288_adc_info {
45 int irq; 54 int irq;
46 struct regmap *regmap; 55 struct regmap *regmap;
56 bool ts_enabled;
47}; 57};
48 58
49static const struct iio_chan_spec axp288_adc_channels[] = { 59static const struct iio_chan_spec axp288_adc_channels[] = {
@@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
115 return IIO_VAL_INT; 125 return IIO_VAL_INT;
116} 126}
117 127
118static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, 128/*
119 unsigned long address) 129 * The current-source used for the battery temp-sensor (TS) is shared
130 * with the GPADC. For proper fuel-gauge and charger operation the TS
131 * current-source needs to be permanently on. But to read the GPADC we
132 * need to temporary switch the TS current-source to ondemand, so that
133 * the GPADC can use it, otherwise we will always read an all 0 value.
134 */
135static int axp288_adc_set_ts(struct axp288_adc_info *info,
136 unsigned int mode, unsigned long address)
120{ 137{
121 int ret; 138 int ret;
122 139
123 /* channels other than GPADC do not need to switch TS pin */ 140 /* No need to switch the current-source if the TS pin is disabled */
141 if (!info->ts_enabled)
142 return 0;
143
144 /* Channels other than GPADC do not need the current source */
124 if (address != AXP288_GP_ADC_H) 145 if (address != AXP288_GP_ADC_H)
125 return 0; 146 return 0;
126 147
127 ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); 148 ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
149 AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
128 if (ret) 150 if (ret)
129 return ret; 151 return ret;
130 152
131 /* When switching to the GPADC pin give things some time to settle */ 153 /* When switching to the GPADC pin give things some time to settle */
132 if (mode == AXP288_ADC_TS_PIN_GPADC) 154 if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
133 usleep_range(6000, 10000); 155 usleep_range(6000, 10000);
134 156
135 return 0; 157 return 0;
@@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
145 mutex_lock(&indio_dev->mlock); 167 mutex_lock(&indio_dev->mlock);
146 switch (mask) { 168 switch (mask) {
147 case IIO_CHAN_INFO_RAW: 169 case IIO_CHAN_INFO_RAW:
148 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, 170 if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
149 chan->address)) { 171 chan->address)) {
150 dev_err(&indio_dev->dev, "GPADC mode\n"); 172 dev_err(&indio_dev->dev, "GPADC mode\n");
151 ret = -EINVAL; 173 ret = -EINVAL;
152 break; 174 break;
153 } 175 }
154 ret = axp288_adc_read_channel(val, chan->address, info->regmap); 176 ret = axp288_adc_read_channel(val, chan->address, info->regmap);
155 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, 177 if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
156 chan->address)) 178 chan->address))
157 dev_err(&indio_dev->dev, "TS pin restore\n"); 179 dev_err(&indio_dev->dev, "TS pin restore\n");
158 break; 180 break;
@@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
164 return ret; 186 return ret;
165} 187}
166 188
167static int axp288_adc_set_state(struct regmap *regmap) 189static int axp288_adc_initialize(struct axp288_adc_info *info)
168{ 190{
169 /* ADC should be always enabled for internal FG to function */ 191 int ret, adc_enable_val;
170 if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) 192
171 return -EIO; 193 /*
194 * Determine if the TS pin is enabled and set the TS current-source
195 * accordingly.
196 */
197 ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
198 if (ret)
199 return ret;
200
201 if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
202 info->ts_enabled = true;
203 ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
204 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
205 AXP288_ADC_TS_CURRENT_ON);
206 } else {
207 info->ts_enabled = false;
208 ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
209 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
210 AXP288_ADC_TS_CURRENT_OFF);
211 }
212 if (ret)
213 return ret;
172 214
173 return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); 215 /* Turn on the ADC for all channels except TS, leave TS as is */
216 return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
217 AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
174} 218}
175 219
176static const struct iio_info axp288_adc_iio_info = { 220static const struct iio_info axp288_adc_iio_info = {
@@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
200 * Set ADC to enabled state at all time, including system suspend. 244 * Set ADC to enabled state at all time, including system suspend.
201 * otherwise internal fuel gauge functionality may be affected. 245 * otherwise internal fuel gauge functionality may be affected.
202 */ 246 */
203 ret = axp288_adc_set_state(axp20x->regmap); 247 ret = axp288_adc_initialize(info);
204 if (ret) { 248 if (ret) {
205 dev_err(&pdev->dev, "unable to enable ADC device\n"); 249 dev_err(&pdev->dev, "unable to enable ADC device\n");
206 return ret; 250 return ret;
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 184d686ebd99..8b4568edd5cb 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -41,6 +41,7 @@
41 41
42#define ADS8688_VREF_MV 4096 42#define ADS8688_VREF_MV 4096
43#define ADS8688_REALBITS 16 43#define ADS8688_REALBITS 16
44#define ADS8688_MAX_CHANNELS 8
44 45
45/* 46/*
46 * enum ads8688_range - ADS8688 reference voltage range 47 * enum ads8688_range - ADS8688 reference voltage range
@@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
385{ 386{
386 struct iio_poll_func *pf = p; 387 struct iio_poll_func *pf = p;
387 struct iio_dev *indio_dev = pf->indio_dev; 388 struct iio_dev *indio_dev = pf->indio_dev;
388 u16 buffer[8]; 389 u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
389 int i, j = 0; 390 int i, j = 0;
390 391
391 for (i = 0; i < indio_dev->masklength; i++) { 392 for (i = 0; i < indio_dev->masklength; i++) {
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index cafb1dcadc48..9d984f2a8ba7 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -142,7 +142,10 @@ static void tiadc_step_config(struct iio_dev *indio_dev)
142 stepconfig |= STEPCONFIG_MODE_SWCNT; 142 stepconfig |= STEPCONFIG_MODE_SWCNT;
143 143
144 tiadc_writel(adc_dev, REG_STEPCONFIG(steps), 144 tiadc_writel(adc_dev, REG_STEPCONFIG(steps),
145 stepconfig | STEPCONFIG_INP(chan)); 145 stepconfig | STEPCONFIG_INP(chan) |
146 STEPCONFIG_INM_ADCREFM |
147 STEPCONFIG_RFP_VREFP |
148 STEPCONFIG_RFM_VREFN);
146 149
147 if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) { 150 if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) {
148 dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n", 151 dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n",
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index a406ad31b096..3a20cb5d9bff 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
444 case IIO_CHAN_INFO_SCALE: 444 case IIO_CHAN_INFO_SCALE:
445 switch (chan->type) { 445 switch (chan->type) {
446 case IIO_TEMP: 446 case IIO_TEMP:
447 *val = 1; /* 0.01 */ 447 *val = 10;
448 *val2 = 100; 448 return IIO_VAL_INT;
449 break;
450 case IIO_PH: 449 case IIO_PH:
451 *val = 1; /* 0.001 */ 450 *val = 1; /* 0.001 */
452 *val2 = 1000; 451 *val2 = 1000;
@@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
477 int val, int val2, long mask) 476 int val, int val2, long mask)
478{ 477{
479 struct atlas_data *data = iio_priv(indio_dev); 478 struct atlas_data *data = iio_priv(indio_dev);
480 __be32 reg = cpu_to_be32(val); 479 __be32 reg = cpu_to_be32(val / 10);
481 480
482 if (val2 != 0 || val < 0 || val > 20000) 481 if (val2 != 0 || val < 0 || val > 20000)
483 return -EINVAL; 482 return -EINVAL;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 63a7cc00bae0..84f077b2b90a 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
494 id_priv->id.route.addr.dev_addr.transport = 494 id_priv->id.route.addr.dev_addr.transport =
495 rdma_node_get_transport(cma_dev->device->node_type); 495 rdma_node_get_transport(cma_dev->device->node_type);
496 list_add_tail(&id_priv->list, &cma_dev->id_list); 496 list_add_tail(&id_priv->list, &cma_dev->id_list);
497 rdma_restrack_kadd(&id_priv->res); 497 if (id_priv->res.kern_name)
498 rdma_restrack_kadd(&id_priv->res);
499 else
500 rdma_restrack_uadd(&id_priv->res);
498} 501}
499 502
500static void cma_attach_to_dev(struct rdma_id_private *id_priv, 503static void cma_attach_to_dev(struct rdma_id_private *id_priv,
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 3cd830d52967..616734313f0c 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -267,7 +267,6 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
267#endif 267#endif
268 268
269struct ib_device *ib_device_get_by_index(u32 ifindex); 269struct ib_device *ib_device_get_by_index(u32 ifindex);
270void ib_device_put(struct ib_device *device);
271/* RDMA device netlink */ 270/* RDMA device netlink */
272void nldev_init(void); 271void nldev_init(void);
273void nldev_exit(void); 272void nldev_exit(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 8872453e26c0..238ec42778ef 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -156,19 +156,26 @@ struct ib_device *ib_device_get_by_index(u32 index)
156 down_read(&lists_rwsem); 156 down_read(&lists_rwsem);
157 device = __ib_device_get_by_index(index); 157 device = __ib_device_get_by_index(index);
158 if (device) { 158 if (device) {
159 /* Do not return a device if unregistration has started. */ 159 if (!ib_device_try_get(device))
160 if (!refcount_inc_not_zero(&device->refcount))
161 device = NULL; 160 device = NULL;
162 } 161 }
163 up_read(&lists_rwsem); 162 up_read(&lists_rwsem);
164 return device; 163 return device;
165} 164}
166 165
166/**
167 * ib_device_put - Release IB device reference
168 * @device: device whose reference to be released
169 *
170 * ib_device_put() releases reference to the IB device to allow it to be
171 * unregistered and eventually free.
172 */
167void ib_device_put(struct ib_device *device) 173void ib_device_put(struct ib_device *device)
168{ 174{
169 if (refcount_dec_and_test(&device->refcount)) 175 if (refcount_dec_and_test(&device->refcount))
170 complete(&device->unreg_completion); 176 complete(&device->unreg_completion);
171} 177}
178EXPORT_SYMBOL(ib_device_put);
172 179
173static struct ib_device *__ib_device_get_by_name(const char *name) 180static struct ib_device *__ib_device_get_by_name(const char *name)
174{ 181{
@@ -303,7 +310,6 @@ struct ib_device *ib_alloc_device(size_t size)
303 rwlock_init(&device->client_data_lock); 310 rwlock_init(&device->client_data_lock);
304 INIT_LIST_HEAD(&device->client_data_list); 311 INIT_LIST_HEAD(&device->client_data_list);
305 INIT_LIST_HEAD(&device->port_list); 312 INIT_LIST_HEAD(&device->port_list);
306 refcount_set(&device->refcount, 1);
307 init_completion(&device->unreg_completion); 313 init_completion(&device->unreg_completion);
308 314
309 return device; 315 return device;
@@ -620,6 +626,7 @@ int ib_register_device(struct ib_device *device, const char *name,
620 goto cg_cleanup; 626 goto cg_cleanup;
621 } 627 }
622 628
629 refcount_set(&device->refcount, 1);
623 device->reg_state = IB_DEV_REGISTERED; 630 device->reg_state = IB_DEV_REGISTERED;
624 631
625 list_for_each_entry(client, &client_list, list) 632 list_for_each_entry(client, &client_list, list)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index e600fc23ae62..3c97a8b6bf1e 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
584 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 584 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
585 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 585 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
586 goto err; 586 goto err;
587 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
588 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
589 pd->unsafe_global_rkey))
590 goto err;
591 587
592 if (fill_res_name_pid(msg, res)) 588 if (fill_res_name_pid(msg, res))
593 goto err; 589 goto err;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index be6b8e1257d0..69f8db66925e 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
106 enum uverbs_obj_access access, 106 enum uverbs_obj_access access,
107 bool commit); 107 bool commit);
108 108
109int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
110
109void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); 111void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
110void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); 112void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
111 113
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index a4ec43093cb3..acb882f279cb 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -352,6 +352,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
352 umem->writable = 1; 352 umem->writable = 1;
353 umem->is_odp = 1; 353 umem->is_odp = 1;
354 odp_data->per_mm = per_mm; 354 odp_data->per_mm = per_mm;
355 umem->owning_mm = per_mm->mm;
356 mmgrab(umem->owning_mm);
355 357
356 mutex_init(&odp_data->umem_mutex); 358 mutex_init(&odp_data->umem_mutex);
357 init_completion(&odp_data->notifier_completion); 359 init_completion(&odp_data->notifier_completion);
@@ -384,6 +386,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
384out_page_list: 386out_page_list:
385 vfree(odp_data->page_list); 387 vfree(odp_data->page_list);
386out_odp_data: 388out_odp_data:
389 mmdrop(umem->owning_mm);
387 kfree(odp_data); 390 kfree(odp_data);
388 return ERR_PTR(ret); 391 return ERR_PTR(ret);
389} 392}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6b12cc5f97b2..3317300ab036 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
60{ 60{
61 int ret; 61 int ret;
62 62
63 if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
64 return uverbs_copy_to_struct_or_zero(
65 attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
66
63 if (copy_to_user(attrs->ucore.outbuf, resp, 67 if (copy_to_user(attrs->ucore.outbuf, resp,
64 min(attrs->ucore.outlen, resp_len))) 68 min(attrs->ucore.outlen, resp_len)))
65 return -EFAULT; 69 return -EFAULT;
@@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
1181 goto out_put; 1185 goto out_put;
1182 } 1186 }
1183 1187
1188 if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
1189 ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
1190
1184 ret = 0; 1191 ret = 0;
1185 1192
1186out_put: 1193out_put:
@@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
2012 return -ENOMEM; 2019 return -ENOMEM;
2013 2020
2014 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); 2021 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2015 if (!qp) 2022 if (!qp) {
2023 ret = -EINVAL;
2016 goto out; 2024 goto out;
2025 }
2017 2026
2018 is_ud = qp->qp_type == IB_QPT_UD; 2027 is_ud = qp->qp_type == IB_QPT_UD;
2019 sg_ind = 0; 2028 sg_ind = 0;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 8c81ff698052..0ca04d224015 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
144 0, uattr->len - len); 144 0, uattr->len - len);
145} 145}
146 146
147static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
148 const struct uverbs_attr *attr)
149{
150 struct bundle_priv *pbundle =
151 container_of(bundle, struct bundle_priv, bundle);
152 u16 flags;
153
154 flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
155 UVERBS_ATTR_F_VALID_OUTPUT;
156 if (put_user(flags,
157 &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
158 return -EFAULT;
159 return 0;
160}
161
147static int uverbs_process_idrs_array(struct bundle_priv *pbundle, 162static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
148 const struct uverbs_api_attr *attr_uapi, 163 const struct uverbs_api_attr *attr_uapi,
149 struct uverbs_objs_arr_attr *attr, 164 struct uverbs_objs_arr_attr *attr,
@@ -456,6 +471,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
456 } 471 }
457 472
458 /* 473 /*
474 * Until the drivers are revised to use the bundle directly we have to
475 * assume that the driver wrote to its UHW_OUT and flag userspace
476 * appropriately.
477 */
478 if (!ret && pbundle->method_elm->has_udata) {
479 const struct uverbs_attr *attr =
480 uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
481
482 if (!IS_ERR(attr))
483 ret = uverbs_set_output(&pbundle->bundle, attr);
484 }
485
486 /*
459 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can 487 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
460 * not invoke the method because the request is not supported. No 488 * not invoke the method because the request is not supported. No
461 * other cases should return this code. 489 * other cases should return this code.
@@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
706int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, 734int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
707 const void *from, size_t size) 735 const void *from, size_t size)
708{ 736{
709 struct bundle_priv *pbundle =
710 container_of(bundle, struct bundle_priv, bundle);
711 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 737 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
712 u16 flags;
713 size_t min_size; 738 size_t min_size;
714 739
715 if (IS_ERR(attr)) 740 if (IS_ERR(attr))
@@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
719 if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) 744 if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
720 return -EFAULT; 745 return -EFAULT;
721 746
722 flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | 747 return uverbs_set_output(bundle, attr);
723 UVERBS_ATTR_F_VALID_OUTPUT;
724 if (put_user(flags,
725 &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
726 return -EFAULT;
727
728 return 0;
729} 748}
730EXPORT_SYMBOL(uverbs_copy_to); 749EXPORT_SYMBOL(uverbs_copy_to);
731 750
751
752/*
753 * This is only used if the caller has directly used copy_to_use to write the
754 * data. It signals to user space that the buffer is filled in.
755 */
756int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
757{
758 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
759
760 if (IS_ERR(attr))
761 return PTR_ERR(attr);
762
763 return uverbs_set_output(bundle, attr);
764}
765
732int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, 766int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
733 size_t idx, s64 lower_bound, u64 upper_bound, 767 size_t idx, s64 lower_bound, u64 upper_bound,
734 s64 *def_val) 768 s64 *def_val)
@@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
757{ 791{
758 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 792 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
759 793
760 if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), 794 if (size < attr->ptr_attr.len) {
761 attr->ptr_attr.len)) 795 if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
762 return -EFAULT; 796 attr->ptr_attr.len - size))
797 return -EFAULT;
798 }
763 return uverbs_copy_to(bundle, idx, from, size); 799 return uverbs_copy_to(bundle, idx, from, size);
764} 800}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb0007aa0c27..5f366838b7ff 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -204,6 +204,9 @@ void ib_uverbs_release_file(struct kref *ref)
204 if (atomic_dec_and_test(&file->device->refcount)) 204 if (atomic_dec_and_test(&file->device->refcount))
205 ib_uverbs_comp_dev(file->device); 205 ib_uverbs_comp_dev(file->device);
206 206
207 if (file->async_file)
208 kref_put(&file->async_file->ref,
209 ib_uverbs_release_async_event_file);
207 put_device(&file->device->dev); 210 put_device(&file->device->dev);
208 kfree(file); 211 kfree(file);
209} 212}
@@ -690,6 +693,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
690 693
691 buf += sizeof(hdr); 694 buf += sizeof(hdr);
692 695
696 memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
693 bundle.ufile = file; 697 bundle.ufile = file;
694 if (!method_elm->is_ex) { 698 if (!method_elm->is_ex) {
695 size_t in_len = hdr.in_words * 4 - sizeof(hdr); 699 size_t in_len = hdr.in_words * 4 - sizeof(hdr);
@@ -963,11 +967,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
963 967
964 /* Get an arbitrary mm pointer that hasn't been cleaned yet */ 968 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
965 mutex_lock(&ufile->umap_lock); 969 mutex_lock(&ufile->umap_lock);
966 if (!list_empty(&ufile->umaps)) { 970 while (!list_empty(&ufile->umaps)) {
967 mm = list_first_entry(&ufile->umaps, 971 int ret;
968 struct rdma_umap_priv, list) 972
969 ->vma->vm_mm; 973 priv = list_first_entry(&ufile->umaps,
970 mmget(mm); 974 struct rdma_umap_priv, list);
975 mm = priv->vma->vm_mm;
976 ret = mmget_not_zero(mm);
977 if (!ret) {
978 list_del_init(&priv->list);
979 mm = NULL;
980 continue;
981 }
982 break;
971 } 983 }
972 mutex_unlock(&ufile->umap_lock); 984 mutex_unlock(&ufile->umap_lock);
973 if (!mm) 985 if (!mm)
@@ -1095,10 +1107,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
1095 list_del_init(&file->list); 1107 list_del_init(&file->list);
1096 mutex_unlock(&file->device->lists_mutex); 1108 mutex_unlock(&file->device->lists_mutex);
1097 1109
1098 if (file->async_file)
1099 kref_put(&file->async_file->ref,
1100 ib_uverbs_release_async_event_file);
1101
1102 kref_put(&file->ref, ib_uverbs_release_file); 1110 kref_put(&file->ref, ib_uverbs_release_file);
1103 1111
1104 return 0; 1112 return 0;
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 5030ec480370..2a3f2f01028d 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -168,12 +168,18 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr,
168static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( 168static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
169 struct uverbs_attr_bundle *attrs) 169 struct uverbs_attr_bundle *attrs)
170{ 170{
171 struct ib_device *ib_dev = attrs->ufile->device->ib_dev; 171 struct ib_device *ib_dev;
172 struct ib_port_attr attr = {}; 172 struct ib_port_attr attr = {};
173 struct ib_uverbs_query_port_resp_ex resp = {}; 173 struct ib_uverbs_query_port_resp_ex resp = {};
174 struct ib_ucontext *ucontext;
174 int ret; 175 int ret;
175 u8 port_num; 176 u8 port_num;
176 177
178 ucontext = ib_uverbs_get_ucontext(attrs);
179 if (IS_ERR(ucontext))
180 return PTR_ERR(ucontext);
181 ib_dev = ucontext->device;
182
177 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ 183 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
178 if (!ib_dev->ops.query_port) 184 if (!ib_dev->ops.query_port)
179 return -EOPNOTSUPP; 185 return -EOPNOTSUPP;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 326805461265..19551aa43850 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -766,8 +766,8 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
766 return NULL; 766 return NULL;
767 767
768 sbuf->size = size; 768 sbuf->size = size;
769 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 769 sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
770 &sbuf->dma_addr, GFP_ATOMIC); 770 &sbuf->dma_addr, GFP_ATOMIC);
771 if (!sbuf->sb) 771 if (!sbuf->sb)
772 goto bail; 772 goto bail;
773 773
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 59eeac55626f..57d4951679cb 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -105,10 +105,10 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
105 105
106 if (!sghead) { 106 if (!sghead) {
107 for (i = 0; i < pages; i++) { 107 for (i = 0; i < pages; i++) {
108 pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev, 108 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
109 pbl->pg_size, 109 pbl->pg_size,
110 &pbl->pg_map_arr[i], 110 &pbl->pg_map_arr[i],
111 GFP_KERNEL); 111 GFP_KERNEL);
112 if (!pbl->pg_arr[i]) 112 if (!pbl->pg_arr[i])
113 goto fail; 113 goto fail;
114 pbl->pg_count++; 114 pbl->pg_count++;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index df4f7a3f043d..8ac72ac7cbac 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -291,9 +291,9 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
291 if (!wq->sq) 291 if (!wq->sq)
292 goto err3; 292 goto err3;
293 293
294 wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev), 294 wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
295 depth * sizeof(union t3_wr), 295 depth * sizeof(union t3_wr),
296 &(wq->dma_addr), GFP_KERNEL); 296 &(wq->dma_addr), GFP_KERNEL);
297 if (!wq->queue) 297 if (!wq->queue)
298 goto err4; 298 goto err4;
299 299
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index c13c0ba30f63..d499cd61c0e8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -783,6 +783,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
783static int c4iw_rdev_open(struct c4iw_rdev *rdev) 783static int c4iw_rdev_open(struct c4iw_rdev *rdev)
784{ 784{
785 int err; 785 int err;
786 unsigned int factor;
786 787
787 c4iw_init_dev_ucontext(rdev, &rdev->uctx); 788 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
788 789
@@ -806,8 +807,18 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
806 return -EINVAL; 807 return -EINVAL;
807 } 808 }
808 809
809 rdev->qpmask = rdev->lldi.udb_density - 1; 810 /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
810 rdev->cqmask = rdev->lldi.ucq_density - 1; 811 if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
812 pr_err("%s: unsupported sge host page size %u\n",
813 pci_name(rdev->lldi.pdev),
814 rdev->lldi.sge_host_page_size);
815 return -EINVAL;
816 }
817
818 factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
819 rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
820 rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
821
811 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n", 822 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
812 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, 823 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
813 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), 824 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 981ff5cfb5d1..504cf525508f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2564,9 +2564,8 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2564 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> 2564 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
2565 T4_RQT_ENTRY_SHIFT; 2565 T4_RQT_ENTRY_SHIFT;
2566 2566
2567 wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev, 2567 wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
2568 wq->memsize, &wq->dma_addr, 2568 &wq->dma_addr, GFP_KERNEL);
2569 GFP_KERNEL);
2570 if (!wq->queue) 2569 if (!wq->queue)
2571 goto err_free_rqtpool; 2570 goto err_free_rqtpool;
2572 2571
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c22ebc774a6a..f9a7e9d29c8b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
488 vmf = 1; 488 vmf = 1;
489 break; 489 break;
490 case STATUS: 490 case STATUS:
491 if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) { 491 if (flags & VM_WRITE) {
492 ret = -EPERM; 492 ret = -EPERM;
493 goto done; 493 goto done;
494 } 494 }
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 09044905284f..7835eb52e7c5 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -899,10 +899,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
899 goto done; 899 goto done;
900 900
901 /* allocate dummy tail memory for all receive contexts */ 901 /* allocate dummy tail memory for all receive contexts */
902 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 902 dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
903 &dd->pcidev->dev, sizeof(u64), 903 sizeof(u64),
904 &dd->rcvhdrtail_dummy_dma, 904 &dd->rcvhdrtail_dummy_dma,
905 GFP_KERNEL); 905 GFP_KERNEL);
906 906
907 if (!dd->rcvhdrtail_dummy_kvaddr) { 907 if (!dd->rcvhdrtail_dummy_kvaddr) {
908 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 908 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
@@ -1863,9 +1863,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1863 gfp_flags = GFP_KERNEL; 1863 gfp_flags = GFP_KERNEL;
1864 else 1864 else
1865 gfp_flags = GFP_USER; 1865 gfp_flags = GFP_USER;
1866 rcd->rcvhdrq = dma_zalloc_coherent( 1866 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1867 &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, 1867 &rcd->rcvhdrq_dma,
1868 gfp_flags | __GFP_COMP); 1868 gfp_flags | __GFP_COMP);
1869 1869
1870 if (!rcd->rcvhdrq) { 1870 if (!rcd->rcvhdrq) {
1871 dd_dev_err(dd, 1871 dd_dev_err(dd,
@@ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1876 1876
1877 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || 1877 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1878 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { 1878 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1879 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 1879 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1880 &dd->pcidev->dev, PAGE_SIZE, 1880 PAGE_SIZE,
1881 &rcd->rcvhdrqtailaddr_dma, gfp_flags); 1881 &rcd->rcvhdrqtailaddr_dma,
1882 gfp_flags);
1882 if (!rcd->rcvhdrtail_kvaddr) 1883 if (!rcd->rcvhdrtail_kvaddr)
1883 goto bail_free; 1884 goto bail_free;
1884 } 1885 }
@@ -1974,10 +1975,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1974 while (alloced_bytes < rcd->egrbufs.size && 1975 while (alloced_bytes < rcd->egrbufs.size &&
1975 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1976 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1976 rcd->egrbufs.buffers[idx].addr = 1977 rcd->egrbufs.buffers[idx].addr =
1977 dma_zalloc_coherent(&dd->pcidev->dev, 1978 dma_alloc_coherent(&dd->pcidev->dev,
1978 rcd->egrbufs.rcvtid_size, 1979 rcd->egrbufs.rcvtid_size,
1979 &rcd->egrbufs.buffers[idx].dma, 1980 &rcd->egrbufs.buffers[idx].dma,
1980 gfp_flags); 1981 gfp_flags);
1981 if (rcd->egrbufs.buffers[idx].addr) { 1982 if (rcd->egrbufs.buffers[idx].addr) {
1982 rcd->egrbufs.buffers[idx].len = 1983 rcd->egrbufs.buffers[idx].len =
1983 rcd->egrbufs.rcvtid_size; 1984 rcd->egrbufs.rcvtid_size;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index dd5a5c030066..04126d7e318d 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd)
2098 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); 2098 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
2099 2099
2100 set_dev_node(&dd->pcidev->dev, i); 2100 set_dev_node(&dd->pcidev->dev, i);
2101 dd->cr_base[i].va = dma_zalloc_coherent( 2101 dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
2102 &dd->pcidev->dev, 2102 bytes,
2103 bytes, 2103 &dd->cr_base[i].dma,
2104 &dd->cr_base[i].dma, 2104 GFP_KERNEL);
2105 GFP_KERNEL);
2106 if (!dd->cr_base[i].va) { 2105 if (!dd->cr_base[i].va) {
2107 set_dev_node(&dd->pcidev->dev, dd->node); 2106 set_dev_node(&dd->pcidev->dev, dd->node);
2108 dd_dev_err(dd, 2107 dd_dev_err(dd,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index b84356e1a4c1..96897a91fb0a 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
1453 timer_setup(&sde->err_progress_check_timer, 1453 timer_setup(&sde->err_progress_check_timer,
1454 sdma_err_progress_check, 0); 1454 sdma_err_progress_check, 0);
1455 1455
1456 sde->descq = dma_zalloc_coherent( 1456 sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
1457 &dd->pcidev->dev, 1457 descq_cnt * sizeof(u64[2]),
1458 descq_cnt * sizeof(u64[2]), 1458 &sde->descq_phys, GFP_KERNEL);
1459 &sde->descq_phys,
1460 GFP_KERNEL
1461 );
1462 if (!sde->descq) 1459 if (!sde->descq)
1463 goto bail; 1460 goto bail;
1464 sde->tx_ring = 1461 sde->tx_ring =
@@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
1471 1468
1472 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; 1469 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1473 /* Allocate memory for DMA of head registers to memory */ 1470 /* Allocate memory for DMA of head registers to memory */
1474 dd->sdma_heads_dma = dma_zalloc_coherent( 1471 dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
1475 &dd->pcidev->dev, 1472 dd->sdma_heads_size,
1476 dd->sdma_heads_size, 1473 &dd->sdma_heads_phys,
1477 &dd->sdma_heads_phys, 1474 GFP_KERNEL);
1478 GFP_KERNEL
1479 );
1480 if (!dd->sdma_heads_dma) { 1475 if (!dd->sdma_heads_dma) {
1481 dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); 1476 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1482 goto bail; 1477 goto bail;
1483 } 1478 }
1484 1479
1485 /* Allocate memory for pad */ 1480 /* Allocate memory for pad */
1486 dd->sdma_pad_dma = dma_zalloc_coherent( 1481 dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
1487 &dd->pcidev->dev, 1482 &dd->sdma_pad_phys, GFP_KERNEL);
1488 sizeof(u32),
1489 &dd->sdma_pad_phys,
1490 GFP_KERNEL
1491 );
1492 if (!dd->sdma_pad_dma) { 1483 if (!dd->sdma_pad_dma) {
1493 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); 1484 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1494 goto bail; 1485 goto bail;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 88242fe95eaa..bf96067876c9 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -987,7 +987,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
987 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 987 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
988 wc.ex.imm_data = packet->ohdr->u.ud.imm_data; 988 wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
989 wc.wc_flags = IB_WC_WITH_IMM; 989 wc.wc_flags = IB_WC_WITH_IMM;
990 tlen -= sizeof(u32);
991 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 990 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
992 wc.ex.imm_data = 0; 991 wc.ex.imm_data = 0;
993 wc.wc_flags = 0; 992 wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 6300033a448f..dac058d3df53 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -197,8 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
197 buf->npages = 1 << order; 197 buf->npages = 1 << order;
198 buf->page_shift = page_shift; 198 buf->page_shift = page_shift;
199 /* MTT PA must be recorded in 4k alignment, t is 4k aligned */ 199 /* MTT PA must be recorded in 4k alignment, t is 4k aligned */
200 buf->direct.buf = dma_zalloc_coherent(dev, 200 buf->direct.buf = dma_alloc_coherent(dev, size, &t,
201 size, &t, GFP_KERNEL); 201 GFP_KERNEL);
202 if (!buf->direct.buf) 202 if (!buf->direct.buf)
203 return -ENOMEM; 203 return -ENOMEM;
204 204
@@ -219,9 +219,10 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
219 return -ENOMEM; 219 return -ENOMEM;
220 220
221 for (i = 0; i < buf->nbufs; ++i) { 221 for (i = 0; i < buf->nbufs; ++i) {
222 buf->page_list[i].buf = dma_zalloc_coherent(dev, 222 buf->page_list[i].buf = dma_alloc_coherent(dev,
223 page_size, &t, 223 page_size,
224 GFP_KERNEL); 224 &t,
225 GFP_KERNEL);
225 226
226 if (!buf->page_list[i].buf) 227 if (!buf->page_list[i].buf)
227 goto err_free; 228 goto err_free;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 3a669451cf86..543fa1504cd3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5091,7 +5091,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5091 eqe_alloc = i * (buf_chk_sz / eq->eqe_size); 5091 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5092 size = (eq->entries - eqe_alloc) * eq->eqe_size; 5092 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5093 } 5093 }
5094 eq->buf[i] = dma_zalloc_coherent(dev, size, 5094 eq->buf[i] = dma_alloc_coherent(dev, size,
5095 &(eq->buf_dma[i]), 5095 &(eq->buf_dma[i]),
5096 GFP_KERNEL); 5096 GFP_KERNEL);
5097 if (!eq->buf[i]) 5097 if (!eq->buf[i])
@@ -5126,9 +5126,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5126 size = (eq->entries - eqe_alloc) 5126 size = (eq->entries - eqe_alloc)
5127 * eq->eqe_size; 5127 * eq->eqe_size;
5128 } 5128 }
5129 eq->buf[idx] = dma_zalloc_coherent(dev, size, 5129 eq->buf[idx] = dma_alloc_coherent(dev, size,
5130 &(eq->buf_dma[idx]), 5130 &(eq->buf_dma[idx]),
5131 GFP_KERNEL); 5131 GFP_KERNEL);
5132 if (!eq->buf[idx]) 5132 if (!eq->buf[idx])
5133 goto err_dma_alloc_buf; 5133 goto err_dma_alloc_buf;
5134 5134
@@ -5241,7 +5241,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5241 goto free_cmd_mbox; 5241 goto free_cmd_mbox;
5242 } 5242 }
5243 5243
5244 eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz, 5244 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
5245 &(eq->buf_list->map), 5245 &(eq->buf_list->map),
5246 GFP_KERNEL); 5246 GFP_KERNEL);
5247 if (!eq->buf_list->buf) { 5247 if (!eq->buf_list->buf) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 960b1946c365..12deacf442cf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -210,6 +210,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
210 struct ib_udata *udata) 210 struct ib_udata *udata)
211{ 211{
212 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 212 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
213 struct hns_roce_ib_create_srq_resp resp = {};
213 struct hns_roce_srq *srq; 214 struct hns_roce_srq *srq;
214 int srq_desc_size; 215 int srq_desc_size;
215 int srq_buf_size; 216 int srq_buf_size;
@@ -378,16 +379,21 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
378 379
379 srq->event = hns_roce_ib_srq_event; 380 srq->event = hns_roce_ib_srq_event;
380 srq->ibsrq.ext.xrc.srq_num = srq->srqn; 381 srq->ibsrq.ext.xrc.srq_num = srq->srqn;
382 resp.srqn = srq->srqn;
381 383
382 if (udata) { 384 if (udata) {
383 if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { 385 if (ib_copy_to_udata(udata, &resp,
386 min(udata->outlen, sizeof(resp)))) {
384 ret = -EFAULT; 387 ret = -EFAULT;
385 goto err_wrid; 388 goto err_srqc_alloc;
386 } 389 }
387 } 390 }
388 391
389 return &srq->ibsrq; 392 return &srq->ibsrq;
390 393
394err_srqc_alloc:
395 hns_roce_srq_free(hr_dev, srq);
396
391err_wrid: 397err_wrid:
392 kvfree(srq->wrid); 398 kvfree(srq->wrid);
393 399
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index a9ea966877f2..59e978141ad4 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -745,8 +745,8 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
745 if (!mem) 745 if (!mem)
746 return I40IW_ERR_PARAM; 746 return I40IW_ERR_PARAM;
747 mem->size = ALIGN(size, alignment); 747 mem->size = ALIGN(size, alignment);
748 mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size, 748 mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
749 (dma_addr_t *)&mem->pa, GFP_KERNEL); 749 (dma_addr_t *)&mem->pa, GFP_KERNEL);
750 if (!mem->va) 750 if (!mem->va)
751 return I40IW_ERR_NO_MEMORY; 751 return I40IW_ERR_NO_MEMORY;
752 return 0; 752 return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 25439da8976c..936ee1314bcd 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1411 1411
1412 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); 1412 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1413 if (sqp->tx_ring[wire_tx_ix].ah) 1413 if (sqp->tx_ring[wire_tx_ix].ah)
1414 rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); 1414 mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0);
1415 sqp->tx_ring[wire_tx_ix].ah = ah; 1415 sqp->tx_ring[wire_tx_ix].ah = ah;
1416 ib_dma_sync_single_for_cpu(&dev->ib_dev, 1416 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1417 sqp->tx_ring[wire_tx_ix].buf.map, 1417 sqp->tx_ring[wire_tx_ix].buf.map,
@@ -1902,7 +1902,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1902 if (wc.status == IB_WC_SUCCESS) { 1902 if (wc.status == IB_WC_SUCCESS) {
1903 switch (wc.opcode) { 1903 switch (wc.opcode) {
1904 case IB_WC_SEND: 1904 case IB_WC_SEND:
1905 rdma_destroy_ah(sqp->tx_ring[wc.wr_id & 1905 mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1906 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); 1906 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1907 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1907 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1908 = NULL; 1908 = NULL;
@@ -1931,7 +1931,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1931 " status = %d, wrid = 0x%llx\n", 1931 " status = %d, wrid = 0x%llx\n",
1932 ctx->slave, wc.status, wc.wr_id); 1932 ctx->slave, wc.status, wc.wr_id);
1933 if (!MLX4_TUN_IS_RECV(wc.wr_id)) { 1933 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1934 rdma_destroy_ah(sqp->tx_ring[wc.wr_id & 1934 mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1935 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); 1935 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1936 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1936 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1937 = NULL; 1937 = NULL;
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index e8a1e4498e3f..798591a18484 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -630,8 +630,7 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
630 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), 630 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
631 UAPI_DEF_CHAIN_OBJ_TREE( 631 UAPI_DEF_CHAIN_OBJ_TREE(
632 UVERBS_OBJECT_FLOW, 632 UVERBS_OBJECT_FLOW,
633 &mlx5_ib_fs, 633 &mlx5_ib_fs),
634 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
635 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 634 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
636 &mlx5_ib_flow_actions), 635 &mlx5_ib_flow_actions),
637 {}, 636 {},
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 01e0f6200631..4ee32964e1dd 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1595,10 +1595,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
1595 struct prefetch_mr_work *w = 1595 struct prefetch_mr_work *w =
1596 container_of(work, struct prefetch_mr_work, work); 1596 container_of(work, struct prefetch_mr_work, work);
1597 1597
1598 if (w->dev->ib_dev.reg_state == IB_DEV_REGISTERED) 1598 if (ib_device_try_get(&w->dev->ib_dev)) {
1599 mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list, 1599 mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list,
1600 w->num_sge); 1600 w->num_sge);
1601 1601 ib_device_put(&w->dev->ib_dev);
1602 }
1603 put_device(&w->dev->ib_dev.dev);
1602 kfree(w); 1604 kfree(w);
1603} 1605}
1604 1606
@@ -1617,15 +1619,13 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1617 return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list, 1619 return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list,
1618 num_sge); 1620 num_sge);
1619 1621
1620 if (dev->ib_dev.reg_state != IB_DEV_REGISTERED)
1621 return -ENODEV;
1622
1623 work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); 1622 work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
1624 if (!work) 1623 if (!work)
1625 return -ENOMEM; 1624 return -ENOMEM;
1626 1625
1627 memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); 1626 memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
1628 1627
1628 get_device(&dev->ib_dev.dev);
1629 work->dev = dev; 1629 work->dev = dev;
1630 work->pf_flags = pf_flags; 1630 work->pf_flags = pf_flags;
1631 work->num_sge = num_sge; 1631 work->num_sge = num_sge;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index dd2ae640bc84..7db778d96ef5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1912,14 +1912,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1912 } 1912 }
1913 1913
1914 if (!check_flags_mask(ucmd.flags, 1914 if (!check_flags_mask(ucmd.flags,
1915 MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
1916 MLX5_QP_FLAG_BFREG_INDEX |
1917 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
1918 MLX5_QP_FLAG_SCATTER_CQE |
1915 MLX5_QP_FLAG_SIGNATURE | 1919 MLX5_QP_FLAG_SIGNATURE |
1916 MLX5_QP_FLAG_SCATTER_CQE | 1920 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
1917 MLX5_QP_FLAG_TUNNEL_OFFLOADS | 1921 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1918 MLX5_QP_FLAG_BFREG_INDEX | 1922 MLX5_QP_FLAG_TUNNEL_OFFLOADS |
1919 MLX5_QP_FLAG_TYPE_DCT | 1923 MLX5_QP_FLAG_TYPE_DCI |
1920 MLX5_QP_FLAG_TYPE_DCI | 1924 MLX5_QP_FLAG_TYPE_DCT))
1921 MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
1922 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE))
1923 return -EINVAL; 1925 return -EINVAL;
1924 1926
1925 err = get_qp_user_index(to_mucontext(pd->uobject->context), 1927 err = get_qp_user_index(to_mucontext(pd->uobject->context),
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index cc9c0c8ccba3..112d2f38e0de 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -623,8 +623,9 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
623 page = dev->db_tab->page + end; 623 page = dev->db_tab->page + end;
624 624
625alloc: 625alloc:
626 page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 626 page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
627 &page->mapping, GFP_KERNEL); 627 MTHCA_ICM_PAGE_SIZE, &page->mapping,
628 GFP_KERNEL);
628 if (!page->db_rec) { 629 if (!page->db_rec) {
629 ret = -ENOMEM; 630 ret = -ENOMEM;
630 goto out; 631 goto out;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 82cb6b71ac7c..e3e9dd54caa2 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
534 { 534 {
535 struct mthca_ucontext *context; 535 struct mthca_ucontext *context;
536 536
537 qp = kmalloc(sizeof *qp, GFP_KERNEL); 537 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
538 if (!qp) 538 if (!qp)
539 return ERR_PTR(-ENOMEM); 539 return ERR_PTR(-ENOMEM);
540 540
@@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
600 if (udata) 600 if (udata)
601 return ERR_PTR(-EINVAL); 601 return ERR_PTR(-EINVAL);
602 602
603 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); 603 qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
604 if (!qp) 604 if (!qp)
605 return ERR_PTR(-ENOMEM); 605 return ERR_PTR(-ENOMEM);
606 606
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 241a57a07485..097e5ab2a19f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
380 q->len = len; 380 q->len = len;
381 q->entry_size = entry_size; 381 q->entry_size = entry_size;
382 q->size = len * entry_size; 382 q->size = len * entry_size;
383 q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size, 383 q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
384 &q->dma, GFP_KERNEL); 384 GFP_KERNEL);
385 if (!q->va) 385 if (!q->va)
386 return -ENOMEM; 386 return -ENOMEM;
387 return 0; 387 return 0;
@@ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1819 return -ENOMEM; 1819 return -ENOMEM;
1820 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, 1820 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1821 OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 1821 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1822 cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); 1822 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1823 if (!cq->va) { 1823 if (!cq->va) {
1824 status = -ENOMEM; 1824 status = -ENOMEM;
1825 goto mem_err; 1825 goto mem_err;
@@ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
2209 qp->sq.max_cnt = max_wqe_allocated; 2209 qp->sq.max_cnt = max_wqe_allocated;
2210 len = (hw_pages * hw_page_size); 2210 len = (hw_pages * hw_page_size);
2211 2211
2212 qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2212 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2213 if (!qp->sq.va) 2213 if (!qp->sq.va)
2214 return -EINVAL; 2214 return -EINVAL;
2215 qp->sq.len = len; 2215 qp->sq.len = len;
@@ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2259 qp->rq.max_cnt = max_rqe_allocated; 2259 qp->rq.max_cnt = max_rqe_allocated;
2260 len = (hw_pages * hw_page_size); 2260 len = (hw_pages * hw_page_size);
2261 2261
2262 qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2262 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2263 if (!qp->rq.va) 2263 if (!qp->rq.va)
2264 return -ENOMEM; 2264 return -ENOMEM;
2265 qp->rq.pa = pa; 2265 qp->rq.pa = pa;
@@ -2315,8 +2315,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2315 if (dev->attr.ird == 0) 2315 if (dev->attr.ird == 0)
2316 return 0; 2316 return 0;
2317 2317
2318 qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa, 2318 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
2319 GFP_KERNEL); 2319 GFP_KERNEL);
2320 if (!qp->ird_q_va) 2320 if (!qp->ird_q_va)
2321 return -ENOMEM; 2321 return -ENOMEM;
2322 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, 2322 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index dd15474b19b7..6be0ea109138 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -73,8 +73,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), 73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
74 sizeof(struct ocrdma_rdma_stats_resp)); 74 sizeof(struct ocrdma_rdma_stats_resp));
75 75
76 mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size, 76 mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
77 &mem->pa, GFP_KERNEL); 77 &mem->pa, GFP_KERNEL);
78 if (!mem->va) { 78 if (!mem->va) {
79 pr_err("%s: stats mbox allocation failed\n", __func__); 79 pr_err("%s: stats mbox allocation failed\n", __func__);
80 return false; 80 return false;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index c46bed0c5513..287c332ff0e6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -504,8 +504,8 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
504 INIT_LIST_HEAD(&ctx->mm_head); 504 INIT_LIST_HEAD(&ctx->mm_head);
505 mutex_init(&ctx->mm_list_lock); 505 mutex_init(&ctx->mm_list_lock);
506 506
507 ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len, 507 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
508 &ctx->ah_tbl.pa, GFP_KERNEL); 508 &ctx->ah_tbl.pa, GFP_KERNEL);
509 if (!ctx->ah_tbl.va) { 509 if (!ctx->ah_tbl.va) {
510 kfree(ctx); 510 kfree(ctx);
511 return ERR_PTR(-ENOMEM); 511 return ERR_PTR(-ENOMEM);
@@ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
838 return -ENOMEM; 838 return -ENOMEM;
839 839
840 for (i = 0; i < mr->num_pbls; i++) { 840 for (i = 0; i < mr->num_pbls; i++) {
841 va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 841 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
842 if (!va) { 842 if (!va) {
843 ocrdma_free_mr_pbl_tbl(dev, mr); 843 ocrdma_free_mr_pbl_tbl(dev, mr);
844 status = -ENOMEM; 844 status = -ENOMEM;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b342a70e2814..e1ccf32b1c3d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -556,8 +556,8 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
556 return ERR_PTR(-ENOMEM); 556 return ERR_PTR(-ENOMEM);
557 557
558 for (i = 0; i < pbl_info->num_pbls; i++) { 558 for (i = 0; i < pbl_info->num_pbls; i++) {
559 va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size, 559 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
560 &pa, flags); 560 flags);
561 if (!va) 561 if (!va)
562 goto err; 562 goto err;
563 563
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 868da0ece7ba..445ea19a2ec8 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -512,7 +512,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
513 wc.ex.imm_data = ohdr->u.ud.imm_data; 513 wc.ex.imm_data = ohdr->u.ud.imm_data;
514 wc.wc_flags = IB_WC_WITH_IMM; 514 wc.wc_flags = IB_WC_WITH_IMM;
515 tlen -= sizeof(u32);
516 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 515 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
517 wc.ex.imm_data = 0; 516 wc.ex.imm_data = 0;
518 wc.wc_flags = 0; 517 wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 42b8685c997e..3c633ab58052 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
427 427
428static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) 428static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
429{ 429{
430 return (enum pvrdma_wr_opcode)op; 430 switch (op) {
431 case IB_WR_RDMA_WRITE:
432 return PVRDMA_WR_RDMA_WRITE;
433 case IB_WR_RDMA_WRITE_WITH_IMM:
434 return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
435 case IB_WR_SEND:
436 return PVRDMA_WR_SEND;
437 case IB_WR_SEND_WITH_IMM:
438 return PVRDMA_WR_SEND_WITH_IMM;
439 case IB_WR_RDMA_READ:
440 return PVRDMA_WR_RDMA_READ;
441 case IB_WR_ATOMIC_CMP_AND_SWP:
442 return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
443 case IB_WR_ATOMIC_FETCH_AND_ADD:
444 return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
445 case IB_WR_LSO:
446 return PVRDMA_WR_LSO;
447 case IB_WR_SEND_WITH_INV:
448 return PVRDMA_WR_SEND_WITH_INV;
449 case IB_WR_RDMA_READ_WITH_INV:
450 return PVRDMA_WR_RDMA_READ_WITH_INV;
451 case IB_WR_LOCAL_INV:
452 return PVRDMA_WR_LOCAL_INV;
453 case IB_WR_REG_MR:
454 return PVRDMA_WR_FAST_REG_MR;
455 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
456 return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
457 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
458 return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
459 case IB_WR_REG_SIG_MR:
460 return PVRDMA_WR_REG_SIG_MR;
461 default:
462 return PVRDMA_WR_ERROR;
463 }
431} 464}
432 465
433static inline enum ib_wc_status pvrdma_wc_status_to_ib( 466static inline enum ib_wc_status pvrdma_wc_status_to_ib(
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index eaa109dbc96a..39c37b6fd715 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -890,8 +890,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
890 dev_info(&pdev->dev, "device version %d, driver version %d\n", 890 dev_info(&pdev->dev, "device version %d, driver version %d\n",
891 dev->dsr_version, PVRDMA_VERSION); 891 dev->dsr_version, PVRDMA_VERSION);
892 892
893 dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), 893 dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
894 &dev->dsrbase, GFP_KERNEL); 894 &dev->dsrbase, GFP_KERNEL);
895 if (!dev->dsr) { 895 if (!dev->dsr) {
896 dev_err(&pdev->dev, "failed to allocate shared region\n"); 896 dev_err(&pdev->dev, "failed to allocate shared region\n");
897 ret = -ENOMEM; 897 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 3acf74cbe266..1ec3646087ba 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
721 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 721 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
722 wqe_hdr->ex.imm_data = wr->ex.imm_data; 722 wqe_hdr->ex.imm_data = wr->ex.imm_data;
723 723
724 if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
725 *bad_wr = wr;
726 ret = -EINVAL;
727 goto out;
728 }
729
724 switch (qp->ibqp.qp_type) { 730 switch (qp->ibqp.qp_type) {
725 case IB_QPT_GSI: 731 case IB_QPT_GSI:
726 case IB_QPT_UD: 732 case IB_QPT_UD:
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index a1bd8cfc2c25..c6cc3e4ab71d 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2910,6 +2910,8 @@ send:
2910 goto op_err; 2910 goto op_err;
2911 if (!ret) 2911 if (!ret)
2912 goto rnr_nak; 2912 goto rnr_nak;
2913 if (wqe->length > qp->r_len)
2914 goto inv_err;
2913 break; 2915 break;
2914 2916
2915 case IB_WR_RDMA_WRITE_WITH_IMM: 2917 case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -3078,7 +3080,10 @@ op_err:
3078 goto err; 3080 goto err;
3079 3081
3080inv_err: 3082inv_err:
3081 send_status = IB_WC_REM_INV_REQ_ERR; 3083 send_status =
3084 sqp->ibqp.qp_type == IB_QPT_RC ?
3085 IB_WC_REM_INV_REQ_ERR :
3086 IB_WC_SUCCESS;
3082 wc.status = IB_WC_LOC_QP_OP_ERR; 3087 wc.status = IB_WC_LOC_QP_OP_ERR;
3083 goto err; 3088 goto err;
3084 3089
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 1da119d901a9..73e808c1e6ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -248,7 +248,6 @@ struct ipoib_cm_tx {
248 struct list_head list; 248 struct list_head list;
249 struct net_device *dev; 249 struct net_device *dev;
250 struct ipoib_neigh *neigh; 250 struct ipoib_neigh *neigh;
251 struct ipoib_path *path;
252 struct ipoib_tx_buf *tx_ring; 251 struct ipoib_tx_buf *tx_ring;
253 unsigned int tx_head; 252 unsigned int tx_head;
254 unsigned int tx_tail; 253 unsigned int tx_tail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0428e01e8f69..aa9dcfc36cd3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
1312 1312
1313 neigh->cm = tx; 1313 neigh->cm = tx;
1314 tx->neigh = neigh; 1314 tx->neigh = neigh;
1315 tx->path = path;
1316 tx->dev = dev; 1315 tx->dev = dev;
1317 list_add(&tx->list, &priv->cm.start_list); 1316 list_add(&tx->list, &priv->cm.start_list);
1318 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1317 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1371 neigh->daddr + QPN_AND_OPTIONS_OFFSET); 1370 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1372 goto free_neigh; 1371 goto free_neigh;
1373 } 1372 }
1374 memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec)); 1373 memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
1375 1374
1376 spin_unlock_irqrestore(&priv->lock, flags); 1375 spin_unlock_irqrestore(&priv->lock, flags);
1377 netif_tx_unlock_bh(dev); 1376 netif_tx_unlock_bh(dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 31d91538bbf4..694324b37480 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3032,7 +3032,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
3032{ 3032{
3033 struct srp_target_port *target = host_to_target(scmnd->device->host); 3033 struct srp_target_port *target = host_to_target(scmnd->device->host);
3034 struct srp_rdma_ch *ch; 3034 struct srp_rdma_ch *ch;
3035 int i, j;
3036 u8 status; 3035 u8 status;
3037 3036
3038 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 3037 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -3044,15 +3043,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
3044 if (status) 3043 if (status)
3045 return FAILED; 3044 return FAILED;
3046 3045
3047 for (i = 0; i < target->ch_count; i++) {
3048 ch = &target->ch[i];
3049 for (j = 0; j < target->req_ring_size; ++j) {
3050 struct srp_request *req = &ch->req_ring[j];
3051
3052 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
3053 }
3054 }
3055
3056 return SUCCESS; 3046 return SUCCESS;
3057} 3047}
3058 3048
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cfc8b94527b9..aa4e431cbcd3 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -252,6 +252,8 @@ static const struct xpad_device {
252 { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, 252 { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
253 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, 253 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
254 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, 254 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
255 { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
256 { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
255 { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, 257 { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
256 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 258 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
257 { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, 259 { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = {
428 XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ 430 XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
429 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ 431 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
430 XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ 432 XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
433 XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
431 XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ 434 XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
432 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ 435 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
433 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 436 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4713957b0cbb..a878351f1643 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121
420 420
421config KEYBOARD_SNVS_PWRKEY 421config KEYBOARD_SNVS_PWRKEY
422 tristate "IMX SNVS Power Key Driver" 422 tristate "IMX SNVS Power Key Driver"
423 depends on SOC_IMX6SX 423 depends on SOC_IMX6SX || SOC_IMX7D
424 depends on OF 424 depends on OF
425 help 425 help
426 This is the snvs powerkey driver for the Freescale i.MX application 426 This is the snvs powerkey driver for the Freescale i.MX application
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 312916f99597..73686c2460ce 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -75,9 +75,7 @@
75struct cap11xx_led { 75struct cap11xx_led {
76 struct cap11xx_priv *priv; 76 struct cap11xx_priv *priv;
77 struct led_classdev cdev; 77 struct led_classdev cdev;
78 struct work_struct work;
79 u32 reg; 78 u32 reg;
80 enum led_brightness new_brightness;
81}; 79};
82#endif 80#endif
83 81
@@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev)
233} 231}
234 232
235#ifdef CONFIG_LEDS_CLASS 233#ifdef CONFIG_LEDS_CLASS
236static void cap11xx_led_work(struct work_struct *work) 234static int cap11xx_led_set(struct led_classdev *cdev,
235 enum led_brightness value)
237{ 236{
238 struct cap11xx_led *led = container_of(work, struct cap11xx_led, work); 237 struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
239 struct cap11xx_priv *priv = led->priv; 238 struct cap11xx_priv *priv = led->priv;
240 int value = led->new_brightness;
241 239
242 /* 240 /*
243 * All LEDs share the same duty cycle as this is a HW limitation. 241 * All LEDs share the same duty cycle as this is a HW
244 * Brightness levels per LED are either 0 (OFF) and 1 (ON). 242 * limitation. Brightness levels per LED are either
243 * 0 (OFF) and 1 (ON).
245 */ 244 */
246 regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL, 245 return regmap_update_bits(priv->regmap,
247 BIT(led->reg), value ? BIT(led->reg) : 0); 246 CAP11XX_REG_LED_OUTPUT_CONTROL,
248} 247 BIT(led->reg),
249 248 value ? BIT(led->reg) : 0);
250static void cap11xx_led_set(struct led_classdev *cdev,
251 enum led_brightness value)
252{
253 struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
254
255 if (led->new_brightness == value)
256 return;
257
258 led->new_brightness = value;
259 schedule_work(&led->work);
260} 249}
261 250
262static int cap11xx_init_leds(struct device *dev, 251static int cap11xx_init_leds(struct device *dev,
@@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev,
299 led->cdev.default_trigger = 288 led->cdev.default_trigger =
300 of_get_property(child, "linux,default-trigger", NULL); 289 of_get_property(child, "linux,default-trigger", NULL);
301 led->cdev.flags = 0; 290 led->cdev.flags = 0;
302 led->cdev.brightness_set = cap11xx_led_set; 291 led->cdev.brightness_set_blocking = cap11xx_led_set;
303 led->cdev.max_brightness = 1; 292 led->cdev.max_brightness = 1;
304 led->cdev.brightness = LED_OFF; 293 led->cdev.brightness = LED_OFF;
305 294
@@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev,
312 led->reg = reg; 301 led->reg = reg;
313 led->priv = priv; 302 led->priv = priv;
314 303
315 INIT_WORK(&led->work, cap11xx_led_work);
316
317 error = devm_led_classdev_register(dev, &led->cdev); 304 error = devm_led_classdev_register(dev, &led->cdev);
318 if (error) { 305 if (error) {
319 of_node_put(child); 306 of_node_put(child);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 403452ef00e6..3d1cb7bf5e35 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
222 keypad->stopped = true; 222 keypad->stopped = true;
223 spin_unlock_irq(&keypad->lock); 223 spin_unlock_irq(&keypad->lock);
224 224
225 flush_work(&keypad->work.work); 225 flush_delayed_work(&keypad->work);
226 /* 226 /*
227 * matrix_keypad_scan() will leave IRQs enabled; 227 * matrix_keypad_scan() will leave IRQs enabled;
228 * we should disable them now. 228 * we should disable them now.
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 43b86482dda0..d466bc07aebb 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = {
58struct qt2160_led { 58struct qt2160_led {
59 struct qt2160_data *qt2160; 59 struct qt2160_data *qt2160;
60 struct led_classdev cdev; 60 struct led_classdev cdev;
61 struct work_struct work;
62 char name[32]; 61 char name[32];
63 int id; 62 int id;
64 enum led_brightness new_brightness; 63 enum led_brightness brightness;
65}; 64};
66#endif 65#endif
67 66
@@ -74,7 +73,6 @@ struct qt2160_data {
74 u16 key_matrix; 73 u16 key_matrix;
75#ifdef CONFIG_LEDS_CLASS 74#ifdef CONFIG_LEDS_CLASS
76 struct qt2160_led leds[QT2160_NUM_LEDS_X]; 75 struct qt2160_led leds[QT2160_NUM_LEDS_X];
77 struct mutex led_lock;
78#endif 76#endif
79}; 77};
80 78
@@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data);
83 81
84#ifdef CONFIG_LEDS_CLASS 82#ifdef CONFIG_LEDS_CLASS
85 83
86static void qt2160_led_work(struct work_struct *work) 84static int qt2160_led_set(struct led_classdev *cdev,
85 enum led_brightness value)
87{ 86{
88 struct qt2160_led *led = container_of(work, struct qt2160_led, work); 87 struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
89 struct qt2160_data *qt2160 = led->qt2160; 88 struct qt2160_data *qt2160 = led->qt2160;
90 struct i2c_client *client = qt2160->client; 89 struct i2c_client *client = qt2160->client;
91 int value = led->new_brightness;
92 u32 drive, pwmen; 90 u32 drive, pwmen;
93 91
94 mutex_lock(&qt2160->led_lock); 92 if (value != led->brightness) {
95 93 drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
96 drive = qt2160_read(client, QT2160_CMD_DRIVE_X); 94 pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
97 pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); 95 if (value != LED_OFF) {
98 if (value != LED_OFF) { 96 drive |= BIT(led->id);
99 drive |= (1 << led->id); 97 pwmen |= BIT(led->id);
100 pwmen |= (1 << led->id);
101
102 } else {
103 drive &= ~(1 << led->id);
104 pwmen &= ~(1 << led->id);
105 }
106 qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
107 qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
108 98
109 /* 99 } else {
110 * Changing this register will change the brightness 100 drive &= ~BIT(led->id);
111 * of every LED in the qt2160. It's a HW limitation. 101 pwmen &= ~BIT(led->id);
112 */ 102 }
113 if (value != LED_OFF) 103 qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
114 qt2160_write(client, QT2160_CMD_PWM_DUTY, value); 104 qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
115 105
116 mutex_unlock(&qt2160->led_lock); 106 /*
117} 107 * Changing this register will change the brightness
108 * of every LED in the qt2160. It's a HW limitation.
109 */
110 if (value != LED_OFF)
111 qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
118 112
119static void qt2160_led_set(struct led_classdev *cdev, 113 led->brightness = value;
120 enum led_brightness value) 114 }
121{
122 struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
123 115
124 led->new_brightness = value; 116 return 0;
125 schedule_work(&led->work);
126} 117}
127 118
128#endif /* CONFIG_LEDS_CLASS */ 119#endif /* CONFIG_LEDS_CLASS */
@@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160)
293 int ret; 284 int ret;
294 int i; 285 int i;
295 286
296 mutex_init(&qt2160->led_lock);
297
298 for (i = 0; i < QT2160_NUM_LEDS_X; i++) { 287 for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
299 struct qt2160_led *led = &qt2160->leds[i]; 288 struct qt2160_led *led = &qt2160->leds[i];
300 289
301 snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); 290 snprintf(led->name, sizeof(led->name), "qt2160:x%d", i);
302 led->cdev.name = led->name; 291 led->cdev.name = led->name;
303 led->cdev.brightness_set = qt2160_led_set; 292 led->cdev.brightness_set_blocking = qt2160_led_set;
304 led->cdev.brightness = LED_OFF; 293 led->cdev.brightness = LED_OFF;
305 led->id = i; 294 led->id = i;
306 led->qt2160 = qt2160; 295 led->qt2160 = qt2160;
307 296
308 INIT_WORK(&led->work, qt2160_led_work);
309
310 ret = led_classdev_register(&client->dev, &led->cdev); 297 ret = led_classdev_register(&client->dev, &led->cdev);
311 if (ret < 0) 298 if (ret < 0)
312 return ret; 299 return ret;
@@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160)
324{ 311{
325 int i; 312 int i;
326 313
327 for (i = 0; i < QT2160_NUM_LEDS_X; i++) { 314 for (i = 0; i < QT2160_NUM_LEDS_X; i++)
328 led_classdev_unregister(&qt2160->leds[i].cdev); 315 led_classdev_unregister(&qt2160->leds[i].cdev);
329 cancel_work_sync(&qt2160->leds[i].work);
330 }
331} 316}
332 317
333#else 318#else
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index babcfb165e4f..3b85631fde91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
153 153
154 input_dev->id.bustype = BUS_HOST; 154 input_dev->id.bustype = BUS_HOST;
155 155
156 keypad_data->input_dev = input_dev;
157
156 error = keypad_matrix_key_parse_dt(keypad_data); 158 error = keypad_matrix_key_parse_dt(keypad_data);
157 if (error) 159 if (error)
158 return error; 160 return error;
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
168 170
169 input_set_drvdata(input_dev, keypad_data); 171 input_set_drvdata(input_dev, keypad_data);
170 172
171 keypad_data->input_dev = input_dev;
172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 keypad_data->base = devm_ioremap_resource(&pdev->dev, res); 174 keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
175 if (IS_ERR(keypad_data->base)) 175 if (IS_ERR(keypad_data->base))
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index 094bddf56755..c1e66f45d552 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -22,7 +22,6 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/input-polldev.h> 23#include <linux/input-polldev.h>
24#include <linux/i2c.h> 24#include <linux/i2c.h>
25#include <linux/workqueue.h>
26#include <linux/leds.h> 25#include <linux/leds.h>
27 26
28#define APANEL_NAME "Fujitsu Application Panel" 27#define APANEL_NAME "Fujitsu Application Panel"
@@ -59,8 +58,6 @@ struct apanel {
59 struct i2c_client *client; 58 struct i2c_client *client;
60 unsigned short keymap[MAX_PANEL_KEYS]; 59 unsigned short keymap[MAX_PANEL_KEYS];
61 u16 nkeys; 60 u16 nkeys;
62 u16 led_bits;
63 struct work_struct led_work;
64 struct led_classdev mail_led; 61 struct led_classdev mail_led;
65}; 62};
66 63
@@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev)
109 report_key(idev, ap->keymap[i]); 106 report_key(idev, ap->keymap[i]);
110} 107}
111 108
112/* Track state changes of LED */ 109static int mail_led_set(struct led_classdev *led,
113static void led_update(struct work_struct *work)
114{
115 struct apanel *ap = container_of(work, struct apanel, led_work);
116
117 i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits);
118}
119
120static void mail_led_set(struct led_classdev *led,
121 enum led_brightness value) 110 enum led_brightness value)
122{ 111{
123 struct apanel *ap = container_of(led, struct apanel, mail_led); 112 struct apanel *ap = container_of(led, struct apanel, mail_led);
113 u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000;
124 114
125 if (value != LED_OFF) 115 return i2c_smbus_write_word_data(ap->client, 0x10, led_bits);
126 ap->led_bits |= 0x8000;
127 else
128 ap->led_bits &= ~0x8000;
129
130 schedule_work(&ap->led_work);
131} 116}
132 117
133static int apanel_remove(struct i2c_client *client) 118static int apanel_remove(struct i2c_client *client)
@@ -179,7 +164,7 @@ static struct apanel apanel = {
179 }, 164 },
180 .mail_led = { 165 .mail_led = {
181 .name = "mail:blue", 166 .name = "mail:blue",
182 .brightness_set = mail_led_set, 167 .brightness_set_blocking = mail_led_set,
183 }, 168 },
184}; 169};
185 170
@@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client,
235 if (err) 220 if (err)
236 goto out3; 221 goto out3;
237 222
238 INIT_WORK(&ap->led_work, led_update);
239 if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { 223 if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
240 err = led_classdev_register(&client->dev, &ap->mail_led); 224 err = led_classdev_register(&client->dev, &ap->mail_led);
241 if (err) 225 if (err)
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 1efcfdf9f8a8..dd9dd4e40827 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
481 idev->close = bma150_irq_close; 481 idev->close = bma150_irq_close;
482 input_set_drvdata(idev, bma150); 482 input_set_drvdata(idev, bma150);
483 483
484 bma150->input = idev;
485
484 error = input_register_device(idev); 486 error = input_register_device(idev);
485 if (error) { 487 if (error) {
486 input_free_device(idev); 488 input_free_device(idev);
487 return error; 489 return error;
488 } 490 }
489 491
490 bma150->input = idev;
491 return 0; 492 return 0;
492} 493}
493 494
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
510 511
511 bma150_init_input_device(bma150, ipoll_dev->input); 512 bma150_init_input_device(bma150, ipoll_dev->input);
512 513
514 bma150->input_polled = ipoll_dev;
515 bma150->input = ipoll_dev->input;
516
513 error = input_register_polled_device(ipoll_dev); 517 error = input_register_polled_device(ipoll_dev);
514 if (error) { 518 if (error) {
515 input_free_polled_device(ipoll_dev); 519 input_free_polled_device(ipoll_dev);
516 return error; 520 return error;
517 } 521 }
518 522
519 bma150->input_polled = ipoll_dev;
520 bma150->input = ipoll_dev->input;
521
522 return 0; 523 return 0;
523} 524}
524 525
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index 55da191ae550..dbb6d9e1b947 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -34,6 +34,7 @@ struct pwm_vibrator {
34 struct work_struct play_work; 34 struct work_struct play_work;
35 u16 level; 35 u16 level;
36 u32 direction_duty_cycle; 36 u32 direction_duty_cycle;
37 bool vcc_on;
37}; 38};
38 39
39static int pwm_vibrator_start(struct pwm_vibrator *vibrator) 40static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
@@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
42 struct pwm_state state; 43 struct pwm_state state;
43 int err; 44 int err;
44 45
45 err = regulator_enable(vibrator->vcc); 46 if (!vibrator->vcc_on) {
46 if (err) { 47 err = regulator_enable(vibrator->vcc);
47 dev_err(pdev, "failed to enable regulator: %d", err); 48 if (err) {
48 return err; 49 dev_err(pdev, "failed to enable regulator: %d", err);
50 return err;
51 }
52 vibrator->vcc_on = true;
49 } 53 }
50 54
51 pwm_get_state(vibrator->pwm, &state); 55 pwm_get_state(vibrator->pwm, &state);
@@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
76 80
77static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) 81static void pwm_vibrator_stop(struct pwm_vibrator *vibrator)
78{ 82{
79 regulator_disable(vibrator->vcc);
80
81 if (vibrator->pwm_dir) 83 if (vibrator->pwm_dir)
82 pwm_disable(vibrator->pwm_dir); 84 pwm_disable(vibrator->pwm_dir);
83 pwm_disable(vibrator->pwm); 85 pwm_disable(vibrator->pwm);
86
87 if (vibrator->vcc_on) {
88 regulator_disable(vibrator->vcc);
89 vibrator->vcc_on = false;
90 }
84} 91}
85 92
86static void pwm_vibrator_play_work(struct work_struct *work) 93static void pwm_vibrator_play_work(struct work_struct *work)
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 8ec483e8688b..26ec603fe220 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -39,6 +39,7 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/fs.h> 40#include <linux/fs.h>
41#include <linux/miscdevice.h> 41#include <linux/miscdevice.h>
42#include <linux/overflow.h>
42#include <linux/input/mt.h> 43#include <linux/input/mt.h>
43#include "../input-compat.h" 44#include "../input-compat.h"
44 45
@@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file)
405static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, 406static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
406 const struct input_absinfo *abs) 407 const struct input_absinfo *abs)
407{ 408{
408 int min, max; 409 int min, max, range;
409 410
410 min = abs->minimum; 411 min = abs->minimum;
411 max = abs->maximum; 412 max = abs->maximum;
@@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
417 return -EINVAL; 418 return -EINVAL;
418 } 419 }
419 420
420 if (abs->flat > max - min) { 421 if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
421 printk(KERN_DEBUG 422 printk(KERN_DEBUG
422 "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", 423 "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
423 UINPUT_NAME, code, abs->flat, min, max); 424 UINPUT_NAME, code, abs->flat, min, max);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index f322a1768fbb..225ae6980182 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
1336static const struct acpi_device_id elan_acpi_id[] = { 1336static const struct acpi_device_id elan_acpi_id[] = {
1337 { "ELAN0000", 0 }, 1337 { "ELAN0000", 0 },
1338 { "ELAN0100", 0 }, 1338 { "ELAN0100", 0 },
1339 { "ELAN0501", 0 },
1340 { "ELAN0600", 0 }, 1339 { "ELAN0600", 0 },
1341 { "ELAN0602", 0 }, 1340 { "ELAN0602", 0 },
1342 { "ELAN0605", 0 }, 1341 { "ELAN0605", 0 },
@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1346 { "ELAN060C", 0 }, 1345 { "ELAN060C", 0 },
1347 { "ELAN0611", 0 }, 1346 { "ELAN0611", 0 },
1348 { "ELAN0612", 0 }, 1347 { "ELAN0612", 0 },
1348 { "ELAN0617", 0 },
1349 { "ELAN0618", 0 }, 1349 { "ELAN0618", 0 },
1350 { "ELAN061C", 0 }, 1350 { "ELAN061C", 0 },
1351 { "ELAN061D", 0 }, 1351 { "ELAN061D", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 9fe075c137dc..a7f8b1614559 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1119 * Asus UX31 0x361f00 20, 15, 0e clickpad 1119 * Asus UX31 0x361f00 20, 15, 0e clickpad
1120 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1120 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1121 * Avatar AVIU-145A2 0x361f00 ? clickpad 1121 * Avatar AVIU-145A2 0x361f00 ? clickpad
1122 * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
1123 * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
1122 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1124 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1123 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons 1125 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
1124 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons 1126 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1171 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), 1173 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
1172 }, 1174 },
1173 }, 1175 },
1176 {
1177 /* Fujitsu H780 also has a middle button */
1178 .matches = {
1179 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1180 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
1181 },
1182 },
1174#endif 1183#endif
1175 { } 1184 { }
1176}; 1185};
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index b36084710f69..a7cfab3db9ee 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -23,7 +23,6 @@
23#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/clk.h>
27 26
28/* 27/*
29 * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller. 28 * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller.
@@ -75,7 +74,6 @@ struct olpc_apsp {
75 struct serio *kbio; 74 struct serio *kbio;
76 struct serio *padio; 75 struct serio *padio;
77 void __iomem *base; 76 void __iomem *base;
78 struct clk *clk;
79 int open_count; 77 int open_count;
80 int irq; 78 int irq;
81}; 79};
@@ -148,17 +146,11 @@ static int olpc_apsp_open(struct serio *port)
148 struct olpc_apsp *priv = port->port_data; 146 struct olpc_apsp *priv = port->port_data;
149 unsigned int tmp; 147 unsigned int tmp;
150 unsigned long l; 148 unsigned long l;
151 int error;
152 149
153 if (priv->open_count++ == 0) { 150 if (priv->open_count++ == 0) {
154 error = clk_prepare_enable(priv->clk);
155 if (error)
156 return error;
157
158 l = readl(priv->base + COMMAND_FIFO_STATUS); 151 l = readl(priv->base + COMMAND_FIFO_STATUS);
159 if (!(l & CMD_STS_MASK)) { 152 if (!(l & CMD_STS_MASK)) {
160 dev_err(priv->dev, "SP cannot accept commands.\n"); 153 dev_err(priv->dev, "SP cannot accept commands.\n");
161 clk_disable_unprepare(priv->clk);
162 return -EIO; 154 return -EIO;
163 } 155 }
164 156
@@ -179,8 +171,6 @@ static void olpc_apsp_close(struct serio *port)
179 /* Disable interrupt 0 */ 171 /* Disable interrupt 0 */
180 tmp = readl(priv->base + PJ_INTERRUPT_MASK); 172 tmp = readl(priv->base + PJ_INTERRUPT_MASK);
181 writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK); 173 writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK);
182
183 clk_disable_unprepare(priv->clk);
184 } 174 }
185} 175}
186 176
@@ -195,6 +185,8 @@ static int olpc_apsp_probe(struct platform_device *pdev)
195 if (!priv) 185 if (!priv)
196 return -ENOMEM; 186 return -ENOMEM;
197 187
188 priv->dev = &pdev->dev;
189
198 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 190 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
199 priv->base = devm_ioremap_resource(&pdev->dev, res); 191 priv->base = devm_ioremap_resource(&pdev->dev, res);
200 if (IS_ERR(priv->base)) { 192 if (IS_ERR(priv->base)) {
@@ -206,10 +198,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
206 if (priv->irq < 0) 198 if (priv->irq < 0)
207 return priv->irq; 199 return priv->irq;
208 200
209 priv->clk = devm_clk_get(&pdev->dev, "sp");
210 if (IS_ERR(priv->clk))
211 return PTR_ERR(priv->clk);
212
213 /* KEYBOARD */ 201 /* KEYBOARD */
214 kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL); 202 kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
215 if (!kb_serio) 203 if (!kb_serio)
@@ -248,7 +236,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
248 goto err_irq; 236 goto err_irq;
249 } 237 }
250 238
251 priv->dev = &pdev->dev;
252 device_init_wakeup(priv->dev, 1); 239 device_init_wakeup(priv->dev, 1);
253 platform_set_drvdata(pdev, priv); 240 platform_set_drvdata(pdev, priv);
254 241
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index c62cceb97bb1..5e8d8384aa2a 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio)
76{ 76{
77 struct ps2_gpio_data *drvdata = serio->port_data; 77 struct ps2_gpio_data *drvdata = serio->port_data;
78 78
79 flush_delayed_work(&drvdata->tx_work);
79 disable_irq(drvdata->irq); 80 disable_irq(drvdata->irq);
80} 81}
81 82
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index af6027cc7bbf..068dbbc610fc 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -698,7 +698,7 @@ config TOUCHSCREEN_EDT_FT5X06
698 698
699config TOUCHSCREEN_RASPBERRYPI_FW 699config TOUCHSCREEN_RASPBERRYPI_FW
700 tristate "Raspberry Pi's firmware base touch screen support" 700 tristate "Raspberry Pi's firmware base touch screen support"
701 depends on RASPBERRYPI_FIRMWARE || COMPILE_TEST 701 depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST)
702 help 702 help
703 Say Y here if you have the official Raspberry Pi 7 inch screen on 703 Say Y here if you have the official Raspberry Pi 7 inch screen on
704 your system. 704 your system.
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index f456c1125bd6..69881265d121 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -147,8 +147,8 @@ static int rpi_ts_probe(struct platform_device *pdev)
147 return -ENOMEM; 147 return -ENOMEM;
148 ts->pdev = pdev; 148 ts->pdev = pdev;
149 149
150 ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, 150 ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
151 GFP_KERNEL); 151 GFP_KERNEL);
152 if (!ts->fw_regs_va) { 152 if (!ts->fw_regs_va) {
153 dev_err(dev, "failed to dma_alloc_coherent\n"); 153 dev_err(dev, "failed to dma_alloc_coherent\n");
154 return -ENOMEM; 154 return -ENOMEM;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 87ba23a75b38..2a7b78bb98b4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1991,16 +1991,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
1991 1991
1992static void do_detach(struct iommu_dev_data *dev_data) 1992static void do_detach(struct iommu_dev_data *dev_data)
1993{ 1993{
1994 struct protection_domain *domain = dev_data->domain;
1994 struct amd_iommu *iommu; 1995 struct amd_iommu *iommu;
1995 u16 alias; 1996 u16 alias;
1996 1997
1997 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1998 iommu = amd_iommu_rlookup_table[dev_data->devid];
1998 alias = dev_data->alias; 1999 alias = dev_data->alias;
1999 2000
2000 /* decrease reference counters */
2001 dev_data->domain->dev_iommu[iommu->index] -= 1;
2002 dev_data->domain->dev_cnt -= 1;
2003
2004 /* Update data structures */ 2001 /* Update data structures */
2005 dev_data->domain = NULL; 2002 dev_data->domain = NULL;
2006 list_del(&dev_data->list); 2003 list_del(&dev_data->list);
@@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
2010 2007
2011 /* Flush the DTE entry */ 2008 /* Flush the DTE entry */
2012 device_flush_dte(dev_data); 2009 device_flush_dte(dev_data);
2010
2011 /* Flush IOTLB */
2012 domain_flush_tlb_pde(domain);
2013
2014 /* Wait for the flushes to finish */
2015 domain_flush_complete(domain);
2016
2017 /* decrease reference counters - needs to happen after the flushes */
2018 domain->dev_iommu[iommu->index] -= 1;
2019 domain->dev_cnt -= 1;
2013} 2020}
2014 2021
2015/* 2022/*
@@ -2617,13 +2624,13 @@ out_unmap:
2617 bus_addr = address + s->dma_address + (j << PAGE_SHIFT); 2624 bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
2618 iommu_unmap_page(domain, bus_addr, PAGE_SIZE); 2625 iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
2619 2626
2620 if (--mapped_pages) 2627 if (--mapped_pages == 0)
2621 goto out_free_iova; 2628 goto out_free_iova;
2622 } 2629 }
2623 } 2630 }
2624 2631
2625out_free_iova: 2632out_free_iova:
2626 free_iova_fast(&dma_dom->iovad, address, npages); 2633 free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
2627 2634
2628out_err: 2635out_err:
2629 return 0; 2636 return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2bd9ac285c0d..78188bf7e90d 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -363,7 +363,7 @@ static int dmar_map_gfx = 1;
363static int dmar_forcedac; 363static int dmar_forcedac;
364static int intel_iommu_strict; 364static int intel_iommu_strict;
365static int intel_iommu_superpage = 1; 365static int intel_iommu_superpage = 1;
366static int intel_iommu_sm = 1; 366static int intel_iommu_sm;
367static int iommu_identity_mapping; 367static int iommu_identity_mapping;
368 368
369#define IDENTMAP_ALL 1 369#define IDENTMAP_ALL 1
@@ -456,9 +456,9 @@ static int __init intel_iommu_setup(char *str)
456 } else if (!strncmp(str, "sp_off", 6)) { 456 } else if (!strncmp(str, "sp_off", 6)) {
457 pr_info("Disable supported super page\n"); 457 pr_info("Disable supported super page\n");
458 intel_iommu_superpage = 0; 458 intel_iommu_superpage = 0;
459 } else if (!strncmp(str, "sm_off", 6)) { 459 } else if (!strncmp(str, "sm_on", 5)) {
460 pr_info("Intel-IOMMU: disable scalable mode support\n"); 460 pr_info("Intel-IOMMU: scalable mode supported\n");
461 intel_iommu_sm = 0; 461 intel_iommu_sm = 1;
462 } else if (!strncmp(str, "tboot_noforce", 13)) { 462 } else if (!strncmp(str, "tboot_noforce", 13)) {
463 printk(KERN_INFO 463 printk(KERN_INFO
464 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); 464 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -5294,7 +5294,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
5294 struct iommu_resv_region *entry, *next; 5294 struct iommu_resv_region *entry, *next;
5295 5295
5296 list_for_each_entry_safe(entry, next, head, list) { 5296 list_for_each_entry_safe(entry, next, head, list) {
5297 if (entry->type == IOMMU_RESV_RESERVED) 5297 if (entry->type == IOMMU_RESV_MSI)
5298 kfree(entry); 5298 kfree(entry);
5299 } 5299 }
5300} 5300}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 6ede4286b835..7e0df67bd3e9 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -232,9 +232,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
232 232
233 spin_lock_init(&dom->pgtlock); 233 spin_lock_init(&dom->pgtlock);
234 234
235 dom->pgt_va = dma_zalloc_coherent(data->dev, 235 dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
236 M2701_IOMMU_PGT_SIZE, 236 &dom->pgt_pa, GFP_KERNEL);
237 &dom->pgt_pa, GFP_KERNEL);
238 if (!dom->pgt_va) 237 if (!dom->pgt_va)
239 return -ENOMEM; 238 return -ENOMEM;
240 239
@@ -442,6 +441,10 @@ static int mtk_iommu_add_device(struct device *dev)
442 iommu_spec.args_count = count; 441 iommu_spec.args_count = count;
443 442
444 mtk_iommu_create_mapping(dev, &iommu_spec); 443 mtk_iommu_create_mapping(dev, &iommu_spec);
444
445 /* dev->iommu_fwspec might have changed */
446 fwspec = dev_iommu_fwspec_get(dev);
447
445 of_node_put(iommu_spec.np); 448 of_node_put(iommu_spec.np);
446 } 449 }
447 450
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index d8947b28db2d..f04a6df65eb8 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -224,7 +224,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
224 * If we have reason to believe the IOMMU driver missed the initial 224 * If we have reason to believe the IOMMU driver missed the initial
225 * probe for dev, replay it to get things in order. 225 * probe for dev, replay it to get things in order.
226 */ 226 */
227 if (dev->bus && !device_iommu_mapped(dev)) 227 if (!err && dev->bus && !device_iommu_mapped(dev))
228 err = iommu_probe_device(dev); 228 err = iommu_probe_device(dev);
229 229
230 /* Ignore all other errors apart from EPROBE_DEFER */ 230 /* Ignore all other errors apart from EPROBE_DEFER */
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index 2543baba8b1f..5a2ec43b7ddd 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -95,7 +95,7 @@ static inline void setup_irq_channel(u32 magic, void __iomem *reg_addr)
95 95
96 /* Setup 64 channel slots */ 96 /* Setup 64 channel slots */
97 for (i = 0; i < INTC_IRQS; i += 4) 97 for (i = 0; i < INTC_IRQS; i += 4)
98 writel_relaxed(build_channel_val(i, magic), reg_addr + i); 98 writel(build_channel_val(i, magic), reg_addr + i);
99} 99}
100 100
101static int __init 101static int __init
@@ -135,16 +135,10 @@ ck_intc_init_comm(struct device_node *node, struct device_node *parent)
135static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq, 135static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
136 u32 irq_base) 136 u32 irq_base)
137{ 137{
138 u32 irq;
139
140 if (hwirq == 0) 138 if (hwirq == 0)
141 return 0; 139 return 0;
142 140
143 while (hwirq) { 141 handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs);
144 irq = __ffs(hwirq);
145 hwirq &= ~BIT(irq);
146 handle_domain_irq(root_domain, irq_base + irq, regs);
147 }
148 142
149 return 1; 143 return 1;
150} 144}
@@ -154,12 +148,16 @@ static void gx_irq_handler(struct pt_regs *regs)
154{ 148{
155 bool ret; 149 bool ret;
156 150
157 do { 151retry:
158 ret = handle_irq_perbit(regs, 152 ret = handle_irq_perbit(regs,
159 readl_relaxed(reg_base + GX_INTC_PEN31_00), 0); 153 readl(reg_base + GX_INTC_PEN63_32), 32);
160 ret |= handle_irq_perbit(regs, 154 if (ret)
161 readl_relaxed(reg_base + GX_INTC_PEN63_32), 32); 155 goto retry;
162 } while (ret); 156
157 ret = handle_irq_perbit(regs,
158 readl(reg_base + GX_INTC_PEN31_00), 0);
159 if (ret)
160 goto retry;
163} 161}
164 162
165static int __init 163static int __init
@@ -174,14 +172,14 @@ gx_intc_init(struct device_node *node, struct device_node *parent)
174 /* 172 /*
175 * Initial enable reg to disable all interrupts 173 * Initial enable reg to disable all interrupts
176 */ 174 */
177 writel_relaxed(0x0, reg_base + GX_INTC_NEN31_00); 175 writel(0x0, reg_base + GX_INTC_NEN31_00);
178 writel_relaxed(0x0, reg_base + GX_INTC_NEN63_32); 176 writel(0x0, reg_base + GX_INTC_NEN63_32);
179 177
180 /* 178 /*
181 * Initial mask reg with all unmasked, because we only use enalbe reg 179 * Initial mask reg with all unmasked, because we only use enalbe reg
182 */ 180 */
183 writel_relaxed(0x0, reg_base + GX_INTC_NMASK31_00); 181 writel(0x0, reg_base + GX_INTC_NMASK31_00);
184 writel_relaxed(0x0, reg_base + GX_INTC_NMASK63_32); 182 writel(0x0, reg_base + GX_INTC_NMASK63_32);
185 183
186 setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE); 184 setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE);
187 185
@@ -204,20 +202,29 @@ static void ck_irq_handler(struct pt_regs *regs)
204 void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00; 202 void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00;
205 void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32; 203 void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32;
206 204
207 do { 205retry:
208 /* handle 0 - 31 irqs */ 206 /* handle 0 - 63 irqs */
209 ret = handle_irq_perbit(regs, readl_relaxed(reg_pen_lo), 0); 207 ret = handle_irq_perbit(regs, readl(reg_pen_hi), 32);
210 ret |= handle_irq_perbit(regs, readl_relaxed(reg_pen_hi), 32); 208 if (ret)
209 goto retry;
211 210
212 if (nr_irq == INTC_IRQS) 211 ret = handle_irq_perbit(regs, readl(reg_pen_lo), 0);
213 continue; 212 if (ret)
213 goto retry;
214
215 if (nr_irq == INTC_IRQS)
216 return;
214 217
215 /* handle 64 - 127 irqs */ 218 /* handle 64 - 127 irqs */
216 ret |= handle_irq_perbit(regs, 219 ret = handle_irq_perbit(regs,
217 readl_relaxed(reg_pen_lo + CK_INTC_DUAL_BASE), 64); 220 readl(reg_pen_hi + CK_INTC_DUAL_BASE), 96);
218 ret |= handle_irq_perbit(regs, 221 if (ret)
219 readl_relaxed(reg_pen_hi + CK_INTC_DUAL_BASE), 96); 222 goto retry;
220 } while (ret); 223
224 ret = handle_irq_perbit(regs,
225 readl(reg_pen_lo + CK_INTC_DUAL_BASE), 64);
226 if (ret)
227 goto retry;
221} 228}
222 229
223static int __init 230static int __init
@@ -230,11 +237,11 @@ ck_intc_init(struct device_node *node, struct device_node *parent)
230 return ret; 237 return ret;
231 238
232 /* Initial enable reg to disable all interrupts */ 239 /* Initial enable reg to disable all interrupts */
233 writel_relaxed(0, reg_base + CK_INTC_NEN31_00); 240 writel(0, reg_base + CK_INTC_NEN31_00);
234 writel_relaxed(0, reg_base + CK_INTC_NEN63_32); 241 writel(0, reg_base + CK_INTC_NEN63_32);
235 242
236 /* Enable irq intc */ 243 /* Enable irq intc */
237 writel_relaxed(BIT(31), reg_base + CK_INTC_ICR); 244 writel(BIT(31), reg_base + CK_INTC_ICR);
238 245
239 ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0); 246 ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0);
240 ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32); 247 ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32);
@@ -260,8 +267,8 @@ ck_dual_intc_init(struct device_node *node, struct device_node *parent)
260 return ret; 267 return ret;
261 268
262 /* Initial enable reg to disable all interrupts */ 269 /* Initial enable reg to disable all interrupts */
263 writel_relaxed(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE); 270 writel(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE);
264 writel_relaxed(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE); 271 writel(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE);
265 272
266 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64); 273 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64);
267 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96); 274 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index db20e992a40f..c3aba3fc818d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -97,9 +97,14 @@ struct its_device;
97 * The ITS structure - contains most of the infrastructure, with the 97 * The ITS structure - contains most of the infrastructure, with the
98 * top-level MSI domain, the command queue, the collections, and the 98 * top-level MSI domain, the command queue, the collections, and the
99 * list of devices writing to it. 99 * list of devices writing to it.
100 *
101 * dev_alloc_lock has to be taken for device allocations, while the
102 * spinlock must be taken to parse data structures such as the device
103 * list.
100 */ 104 */
101struct its_node { 105struct its_node {
102 raw_spinlock_t lock; 106 raw_spinlock_t lock;
107 struct mutex dev_alloc_lock;
103 struct list_head entry; 108 struct list_head entry;
104 void __iomem *base; 109 void __iomem *base;
105 phys_addr_t phys_base; 110 phys_addr_t phys_base;
@@ -156,6 +161,7 @@ struct its_device {
156 void *itt; 161 void *itt;
157 u32 nr_ites; 162 u32 nr_ites;
158 u32 device_id; 163 u32 device_id;
164 bool shared;
159}; 165};
160 166
161static struct { 167static struct {
@@ -1580,6 +1586,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1580 nr_irqs /= 2; 1586 nr_irqs /= 2;
1581 } while (nr_irqs > 0); 1587 } while (nr_irqs > 0);
1582 1588
1589 if (!nr_irqs)
1590 err = -ENOSPC;
1591
1583 if (err) 1592 if (err)
1584 goto out; 1593 goto out;
1585 1594
@@ -2059,6 +2068,29 @@ static int __init allocate_lpi_tables(void)
2059 return 0; 2068 return 0;
2060} 2069}
2061 2070
2071static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
2072{
2073 u32 count = 1000000; /* 1s! */
2074 bool clean;
2075 u64 val;
2076
2077 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2078 val &= ~GICR_VPENDBASER_Valid;
2079 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2080
2081 do {
2082 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2083 clean = !(val & GICR_VPENDBASER_Dirty);
2084 if (!clean) {
2085 count--;
2086 cpu_relax();
2087 udelay(1);
2088 }
2089 } while (!clean && count);
2090
2091 return val;
2092}
2093
2062static void its_cpu_init_lpis(void) 2094static void its_cpu_init_lpis(void)
2063{ 2095{
2064 void __iomem *rbase = gic_data_rdist_rd_base(); 2096 void __iomem *rbase = gic_data_rdist_rd_base();
@@ -2144,6 +2176,30 @@ static void its_cpu_init_lpis(void)
2144 val |= GICR_CTLR_ENABLE_LPIS; 2176 val |= GICR_CTLR_ENABLE_LPIS;
2145 writel_relaxed(val, rbase + GICR_CTLR); 2177 writel_relaxed(val, rbase + GICR_CTLR);
2146 2178
2179 if (gic_rdists->has_vlpis) {
2180 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2181
2182 /*
2183 * It's possible for CPU to receive VLPIs before it is
2184 * sheduled as a vPE, especially for the first CPU, and the
2185 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2186 * as out of range and dropped by GIC.
2187 * So we initialize IDbits to known value to avoid VLPI drop.
2188 */
2189 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2190 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2191 smp_processor_id(), val);
2192 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2193
2194 /*
2195 * Also clear Valid bit of GICR_VPENDBASER, in case some
2196 * ancient programming gets left in and has possibility of
2197 * corrupting memory.
2198 */
2199 val = its_clear_vpend_valid(vlpi_base);
2200 WARN_ON(val & GICR_VPENDBASER_Dirty);
2201 }
2202
2147 /* Make sure the GIC has seen the above */ 2203 /* Make sure the GIC has seen the above */
2148 dsb(sy); 2204 dsb(sy);
2149out: 2205out:
@@ -2399,13 +2455,14 @@ static void its_free_device(struct its_device *its_dev)
2399 kfree(its_dev); 2455 kfree(its_dev);
2400} 2456}
2401 2457
2402static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) 2458static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
2403{ 2459{
2404 int idx; 2460 int idx;
2405 2461
2406 idx = find_first_zero_bit(dev->event_map.lpi_map, 2462 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2407 dev->event_map.nr_lpis); 2463 dev->event_map.nr_lpis,
2408 if (idx == dev->event_map.nr_lpis) 2464 get_count_order(nvecs));
2465 if (idx < 0)
2409 return -ENOSPC; 2466 return -ENOSPC;
2410 2467
2411 *hwirq = dev->event_map.lpi_base + idx; 2468 *hwirq = dev->event_map.lpi_base + idx;
@@ -2421,6 +2478,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2421 struct its_device *its_dev; 2478 struct its_device *its_dev;
2422 struct msi_domain_info *msi_info; 2479 struct msi_domain_info *msi_info;
2423 u32 dev_id; 2480 u32 dev_id;
2481 int err = 0;
2424 2482
2425 /* 2483 /*
2426 * We ignore "dev" entierely, and rely on the dev_id that has 2484 * We ignore "dev" entierely, and rely on the dev_id that has
@@ -2443,6 +2501,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2443 return -EINVAL; 2501 return -EINVAL;
2444 } 2502 }
2445 2503
2504 mutex_lock(&its->dev_alloc_lock);
2446 its_dev = its_find_device(its, dev_id); 2505 its_dev = its_find_device(its, dev_id);
2447 if (its_dev) { 2506 if (its_dev) {
2448 /* 2507 /*
@@ -2450,18 +2509,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2450 * another alias (PCI bridge of some sort). No need to 2509 * another alias (PCI bridge of some sort). No need to
2451 * create the device. 2510 * create the device.
2452 */ 2511 */
2512 its_dev->shared = true;
2453 pr_debug("Reusing ITT for devID %x\n", dev_id); 2513 pr_debug("Reusing ITT for devID %x\n", dev_id);
2454 goto out; 2514 goto out;
2455 } 2515 }
2456 2516
2457 its_dev = its_create_device(its, dev_id, nvec, true); 2517 its_dev = its_create_device(its, dev_id, nvec, true);
2458 if (!its_dev) 2518 if (!its_dev) {
2459 return -ENOMEM; 2519 err = -ENOMEM;
2520 goto out;
2521 }
2460 2522
2461 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 2523 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2462out: 2524out:
2525 mutex_unlock(&its->dev_alloc_lock);
2463 info->scratchpad[0].ptr = its_dev; 2526 info->scratchpad[0].ptr = its_dev;
2464 return 0; 2527 return err;
2465} 2528}
2466 2529
2467static struct msi_domain_ops its_msi_domain_ops = { 2530static struct msi_domain_ops its_msi_domain_ops = {
@@ -2501,21 +2564,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2501 int err; 2564 int err;
2502 int i; 2565 int i;
2503 2566
2504 for (i = 0; i < nr_irqs; i++) { 2567 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2505 err = its_alloc_device_irq(its_dev, &hwirq); 2568 if (err)
2506 if (err) 2569 return err;
2507 return err;
2508 2570
2509 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); 2571 for (i = 0; i < nr_irqs; i++) {
2572 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
2510 if (err) 2573 if (err)
2511 return err; 2574 return err;
2512 2575
2513 irq_domain_set_hwirq_and_chip(domain, virq + i, 2576 irq_domain_set_hwirq_and_chip(domain, virq + i,
2514 hwirq, &its_irq_chip, its_dev); 2577 hwirq + i, &its_irq_chip, its_dev);
2515 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); 2578 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
2516 pr_debug("ID:%d pID:%d vID:%d\n", 2579 pr_debug("ID:%d pID:%d vID:%d\n",
2517 (int)(hwirq - its_dev->event_map.lpi_base), 2580 (int)(hwirq + i - its_dev->event_map.lpi_base),
2518 (int) hwirq, virq + i); 2581 (int)(hwirq + i), virq + i);
2519 } 2582 }
2520 2583
2521 return 0; 2584 return 0;
@@ -2565,6 +2628,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2565{ 2628{
2566 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 2629 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2567 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2630 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2631 struct its_node *its = its_dev->its;
2568 int i; 2632 int i;
2569 2633
2570 for (i = 0; i < nr_irqs; i++) { 2634 for (i = 0; i < nr_irqs; i++) {
@@ -2579,8 +2643,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2579 irq_domain_reset_irq_data(data); 2643 irq_domain_reset_irq_data(data);
2580 } 2644 }
2581 2645
2582 /* If all interrupts have been freed, start mopping the floor */ 2646 mutex_lock(&its->dev_alloc_lock);
2583 if (bitmap_empty(its_dev->event_map.lpi_map, 2647
2648 /*
2649 * If all interrupts have been freed, start mopping the
2650 * floor. This is conditionned on the device not being shared.
2651 */
2652 if (!its_dev->shared &&
2653 bitmap_empty(its_dev->event_map.lpi_map,
2584 its_dev->event_map.nr_lpis)) { 2654 its_dev->event_map.nr_lpis)) {
2585 its_lpi_free(its_dev->event_map.lpi_map, 2655 its_lpi_free(its_dev->event_map.lpi_map,
2586 its_dev->event_map.lpi_base, 2656 its_dev->event_map.lpi_base,
@@ -2592,6 +2662,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2592 its_free_device(its_dev); 2662 its_free_device(its_dev);
2593 } 2663 }
2594 2664
2665 mutex_unlock(&its->dev_alloc_lock);
2666
2595 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 2667 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2596} 2668}
2597 2669
@@ -2754,26 +2826,11 @@ static void its_vpe_schedule(struct its_vpe *vpe)
2754static void its_vpe_deschedule(struct its_vpe *vpe) 2826static void its_vpe_deschedule(struct its_vpe *vpe)
2755{ 2827{
2756 void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 2828 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2757 u32 count = 1000000; /* 1s! */
2758 bool clean;
2759 u64 val; 2829 u64 val;
2760 2830
2761 /* We're being scheduled out */ 2831 val = its_clear_vpend_valid(vlpi_base);
2762 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2763 val &= ~GICR_VPENDBASER_Valid;
2764 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2765
2766 do {
2767 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2768 clean = !(val & GICR_VPENDBASER_Dirty);
2769 if (!clean) {
2770 count--;
2771 cpu_relax();
2772 udelay(1);
2773 }
2774 } while (!clean && count);
2775 2832
2776 if (unlikely(!clean && !count)) { 2833 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
2777 pr_err_ratelimited("ITS virtual pending table not cleaning\n"); 2834 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2778 vpe->idai = false; 2835 vpe->idai = false;
2779 vpe->pending_last = true; 2836 vpe->pending_last = true;
@@ -3516,6 +3573,7 @@ static int __init its_probe_one(struct resource *res,
3516 } 3573 }
3517 3574
3518 raw_spin_lock_init(&its->lock); 3575 raw_spin_lock_init(&its->lock);
3576 mutex_init(&its->dev_alloc_lock);
3519 INIT_LIST_HEAD(&its->entry); 3577 INIT_LIST_HEAD(&its->entry);
3520 INIT_LIST_HEAD(&its->its_device_list); 3578 INIT_LIST_HEAD(&its->its_device_list);
3521 typer = gic_read_typer(its_base + GITS_TYPER); 3579 typer = gic_read_typer(its_base + GITS_TYPER);
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index ad70e7c416e3..fbfa7ff6deb1 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -24,7 +24,7 @@ struct mbi_range {
24 unsigned long *bm; 24 unsigned long *bm;
25}; 25};
26 26
27static struct mutex mbi_lock; 27static DEFINE_MUTEX(mbi_lock);
28static phys_addr_t mbi_phys_base; 28static phys_addr_t mbi_phys_base;
29static struct mbi_range *mbi_ranges; 29static struct mbi_range *mbi_ranges;
30static unsigned int mbi_range_nr; 30static unsigned int mbi_range_nr;
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c
index e9256dee1a45..8b81271c823c 100644
--- a/drivers/irqchip/irq-madera.c
+++ b/drivers/irqchip/irq-madera.c
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/gpio.h>
11#include <linux/interrupt.h> 10#include <linux/interrupt.h>
12#include <linux/irq.h> 11#include <linux/irq.h>
13#include <linux/irqdomain.h> 12#include <linux/irqdomain.h>
@@ -16,7 +15,6 @@
16#include <linux/slab.h> 15#include <linux/slab.h>
17#include <linux/of.h> 16#include <linux/of.h>
18#include <linux/of_device.h> 17#include <linux/of_device.h>
19#include <linux/of_gpio.h>
20#include <linux/of_irq.h> 18#include <linux/of_irq.h>
21#include <linux/irqchip/irq-madera.h> 19#include <linux/irqchip/irq-madera.h>
22#include <linux/mfd/madera/core.h> 20#include <linux/mfd/madera/core.h>
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 25f32e1d7764..3496b61a312a 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -34,6 +34,9 @@
34#define SEL_INT_PENDING (1 << 6) 34#define SEL_INT_PENDING (1 << 6)
35#define SEL_INT_NUM_MASK 0x3f 35#define SEL_INT_NUM_MASK 0x3f
36 36
37#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
38#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
39
37struct icu_chip_data { 40struct icu_chip_data {
38 int nr_irqs; 41 int nr_irqs;
39 unsigned int virq_base; 42 unsigned int virq_base;
@@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = {
190static const struct mmp_intc_conf mmp2_conf = { 193static const struct mmp_intc_conf mmp2_conf = {
191 .conf_enable = 0x20, 194 .conf_enable = 0x20,
192 .conf_disable = 0x0, 195 .conf_disable = 0x0,
193 .conf_mask = 0x7f, 196 .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
197 MMP2_ICU_INT_ROUTE_PJ4_FIQ,
194}; 198};
195 199
196static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) 200static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 6edfd4bfa169..a93296b9b45d 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -822,6 +822,7 @@ out_unmap:
822static const struct irq_domain_ops stm32_exti_h_domain_ops = { 822static const struct irq_domain_ops stm32_exti_h_domain_ops = {
823 .alloc = stm32_exti_h_domain_alloc, 823 .alloc = stm32_exti_h_domain_alloc,
824 .free = irq_domain_free_irqs_common, 824 .free = irq_domain_free_irqs_common,
825 .xlate = irq_domain_xlate_twocell,
825}; 826};
826 827
827static int 828static int
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 5385f5768345..27933338f7b3 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -71,14 +71,17 @@ static void xtensa_mx_irq_mask(struct irq_data *d)
71 unsigned int mask = 1u << d->hwirq; 71 unsigned int mask = 1u << d->hwirq;
72 72
73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | 73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
74 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 74 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
75 set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - 75 unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
76 HW_IRQ_MX_BASE), MIENG); 76
77 } else { 77 if (ext_irq >= HW_IRQ_MX_BASE) {
78 mask = __this_cpu_read(cached_irq_mask) & ~mask; 78 set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
79 __this_cpu_write(cached_irq_mask, mask); 79 return;
80 xtensa_set_sr(mask, intenable); 80 }
81 } 81 }
82 mask = __this_cpu_read(cached_irq_mask) & ~mask;
83 __this_cpu_write(cached_irq_mask, mask);
84 xtensa_set_sr(mask, intenable);
82} 85}
83 86
84static void xtensa_mx_irq_unmask(struct irq_data *d) 87static void xtensa_mx_irq_unmask(struct irq_data *d)
@@ -86,14 +89,17 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
86 unsigned int mask = 1u << d->hwirq; 89 unsigned int mask = 1u << d->hwirq;
87 90
88 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | 91 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
89 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 92 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
90 set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - 93 unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
91 HW_IRQ_MX_BASE), MIENGSET); 94
92 } else { 95 if (ext_irq >= HW_IRQ_MX_BASE) {
93 mask |= __this_cpu_read(cached_irq_mask); 96 set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
94 __this_cpu_write(cached_irq_mask, mask); 97 return;
95 xtensa_set_sr(mask, intenable); 98 }
96 } 99 }
100 mask |= __this_cpu_read(cached_irq_mask);
101 __this_cpu_write(cached_irq_mask, mask);
102 xtensa_set_sr(mask, intenable);
97} 103}
98 104
99static void xtensa_mx_irq_enable(struct irq_data *d) 105static void xtensa_mx_irq_enable(struct irq_data *d)
@@ -113,7 +119,11 @@ static void xtensa_mx_irq_ack(struct irq_data *d)
113 119
114static int xtensa_mx_irq_retrigger(struct irq_data *d) 120static int xtensa_mx_irq_retrigger(struct irq_data *d)
115{ 121{
116 xtensa_set_sr(1 << d->hwirq, intset); 122 unsigned int mask = 1u << d->hwirq;
123
124 if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
125 return 0;
126 xtensa_set_sr(mask, intset);
117 return 1; 127 return 1;
118} 128}
119 129
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index c200234dd2c9..ab12328be5ee 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -70,7 +70,11 @@ static void xtensa_irq_ack(struct irq_data *d)
70 70
71static int xtensa_irq_retrigger(struct irq_data *d) 71static int xtensa_irq_retrigger(struct irq_data *d)
72{ 72{
73 xtensa_set_sr(1 << d->hwirq, intset); 73 unsigned int mask = 1u << d->hwirq;
74
75 if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
76 return 0;
77 xtensa_set_sr(mask, intset);
74 return 1; 78 return 1;
75} 79}
76 80
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4ac378e48902..40ca1e8fa09f 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
423 int i, j; 423 int i, j;
424 424
425 for (j = 0; j < AVM_MAXVERSION; j++) 425 for (j = 0; j < AVM_MAXVERSION; j++)
426 cinfo->version[j] = "\0\0" + 1; 426 cinfo->version[j] = "";
427 for (i = 0, j = 0; 427 for (i = 0, j = 0;
428 j < AVM_MAXVERSION && i < cinfo->versionlen; 428 j < AVM_MAXVERSION && i < cinfo->versionlen;
429 j++, i += cinfo->versionbuf[i] + 1) 429 j++, i += cinfo->versionbuf[i] + 1)
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 6d05946b445e..124ff530da82 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -262,8 +262,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
262 struct dchannel *dch = &hw->dch; 262 struct dchannel *dch = &hw->dch;
263 int i; 263 int i;
264 264
265 phi = kzalloc(sizeof(struct ph_info) + 265 phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
266 dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC);
267 phi->dch.ch.protocol = hw->protocol; 266 phi->dch.ch.protocol = hw->protocol;
268 phi->dch.ch.Flags = dch->Flags; 267 phi->dch.ch.Flags = dch->Flags;
269 phi->dch.state = dch->state; 268 phi->dch.state = dch->state;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 1b2239c1d569..dc1cded716c1 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1437,15 +1437,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1437{ 1437{
1438 modem_info *info = (modem_info *) tty->driver_data; 1438 modem_info *info = (modem_info *) tty->driver_data;
1439 1439
1440 mutex_lock(&modem_info_mutex);
1440 if (!old_termios) 1441 if (!old_termios)
1441 isdn_tty_change_speed(info); 1442 isdn_tty_change_speed(info);
1442 else { 1443 else {
1443 if (tty->termios.c_cflag == old_termios->c_cflag && 1444 if (tty->termios.c_cflag == old_termios->c_cflag &&
1444 tty->termios.c_ispeed == old_termios->c_ispeed && 1445 tty->termios.c_ispeed == old_termios->c_ispeed &&
1445 tty->termios.c_ospeed == old_termios->c_ospeed) 1446 tty->termios.c_ospeed == old_termios->c_ospeed) {
1447 mutex_unlock(&modem_info_mutex);
1446 return; 1448 return;
1449 }
1447 isdn_tty_change_speed(info); 1450 isdn_tty_change_speed(info);
1448 } 1451 }
1452 mutex_unlock(&modem_info_mutex);
1449} 1453}
1450 1454
1451/* 1455/*
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 211ed6cffd10..578978711887 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t)
170 spin_lock_irqsave(&timer->dev->lock, flags); 170 spin_lock_irqsave(&timer->dev->lock, flags);
171 if (timer->id >= 0) 171 if (timer->id >= 0)
172 list_move_tail(&timer->list, &timer->dev->expired); 172 list_move_tail(&timer->list, &timer->dev->expired);
173 spin_unlock_irqrestore(&timer->dev->lock, flags);
174 wake_up_interruptible(&timer->dev->wait); 173 wake_up_interruptible(&timer->dev->wait);
174 spin_unlock_irqrestore(&timer->dev->lock, flags);
175} 175}
176 176
177static int 177static int
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index a2e74feee2b2..fd64df5a57a5 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
318 318
319 /* Let the programs run for couple of ms and check the engine status */ 319 /* Let the programs run for couple of ms and check the engine status */
320 usleep_range(3000, 6000); 320 usleep_range(3000, 6000);
321 lp55xx_read(chip, LP5523_REG_STATUS, &status); 321 ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
322 if (ret)
323 return ret;
322 status &= LP5523_ENG_STATUS_MASK; 324 status &= LP5523_ENG_STATUS_MASK;
323 325
324 if (status != LP5523_ENG_STATUS_MASK) { 326 if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index d713271ebf7c..a64116586b4c 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan)
1396 1396
1397 /* Clear ring flush state */ 1397 /* Clear ring flush state */
1398 timeout = 1000; /* timeout of 1s */ 1398 timeout = 1000; /* timeout of 1s */
1399 writel_relaxed(0x0, ring + RING_CONTROL); 1399 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1400 do { 1400 do {
1401 if (!(readl_relaxed(ring + RING_FLUSH_DONE) & 1401 if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1402 FLUSH_DONE_MASK)) 1402 FLUSH_DONE_MASK))
1403 break; 1403 break;
1404 mdelay(1); 1404 mdelay(1);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index c6a7d4582dc6..38d9df3fb199 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
310 310
311 return ret; 311 return ret;
312} 312}
313EXPORT_SYMBOL_GPL(mbox_flush);
313 314
314/** 315/**
315 * mbox_request_channel - Request a mailbox channel. 316 * mbox_request_channel - Request a mailbox channel.
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0ff22159a0ca..dd538e6b2748 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
932 if (IS_ERR(bip)) 932 if (IS_ERR(bip))
933 return PTR_ERR(bip); 933 return PTR_ERR(bip);
934 934
935 tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); 935 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
936 936
937 bip->bip_iter.bi_size = tag_len; 937 bip->bip_iter.bi_size = tag_len;
938 bip->bip_iter.bi_sector = io->cc->start + io->sector; 938 bip->bip_iter.bi_sector = io->cc->start + io->sector;
@@ -2414,9 +2414,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
2414 * capi:cipher_api_spec-iv:ivopts 2414 * capi:cipher_api_spec-iv:ivopts
2415 */ 2415 */
2416 tmp = &cipher_in[strlen("capi:")]; 2416 tmp = &cipher_in[strlen("capi:")];
2417 cipher_api = strsep(&tmp, "-"); 2417
2418 *ivmode = strsep(&tmp, ":"); 2418 /* Separate IV options if present, it can contain another '-' in hash name */
2419 *ivopts = tmp; 2419 *ivopts = strrchr(tmp, ':');
2420 if (*ivopts) {
2421 **ivopts = '\0';
2422 (*ivopts)++;
2423 }
2424 /* Parse IV mode */
2425 *ivmode = strrchr(tmp, '-');
2426 if (*ivmode) {
2427 **ivmode = '\0';
2428 (*ivmode)++;
2429 }
2430 /* The rest is crypto API spec */
2431 cipher_api = tmp;
2420 2432
2421 if (*ivmode && !strcmp(*ivmode, "lmk")) 2433 if (*ivmode && !strcmp(*ivmode, "lmk"))
2422 cc->tfms_count = 64; 2434 cc->tfms_count = 64;
@@ -2486,11 +2498,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
2486 goto bad_mem; 2498 goto bad_mem;
2487 2499
2488 chainmode = strsep(&tmp, "-"); 2500 chainmode = strsep(&tmp, "-");
2489 *ivopts = strsep(&tmp, "-"); 2501 *ivmode = strsep(&tmp, ":");
2490 *ivmode = strsep(&*ivopts, ":"); 2502 *ivopts = tmp;
2491
2492 if (tmp)
2493 DMWARN("Ignoring unexpected additional cipher options");
2494 2503
2495 /* 2504 /*
2496 * For compatibility with the original dm-crypt mapping format, if 2505 * For compatibility with the original dm-crypt mapping format, if
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4eb5f8c56535..a20531e5f3b4 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
131static void rq_completed(struct mapped_device *md) 131static void rq_completed(struct mapped_device *md)
132{ 132{
133 /* nudge anyone waiting on suspend queue */ 133 /* nudge anyone waiting on suspend queue */
134 if (unlikely(waitqueue_active(&md->wait))) 134 if (unlikely(wq_has_sleeper(&md->wait)))
135 wake_up(&md->wait); 135 wake_up(&md->wait);
136 136
137 /* 137 /*
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 20b0776e39ef..ed3caceaed07 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
1678 return r; 1678 return r;
1679} 1679}
1680 1680
1681int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) 1681int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1682{ 1682{
1683 int r; 1683 int r;
1684 uint32_t ref_count; 1684 uint32_t ref_count;
@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
1686 down_read(&pmd->root_lock); 1686 down_read(&pmd->root_lock);
1687 r = dm_sm_get_count(pmd->data_sm, b, &ref_count); 1687 r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1688 if (!r) 1688 if (!r)
1689 *result = (ref_count != 0); 1689 *result = (ref_count > 1);
1690 up_read(&pmd->root_lock); 1690 up_read(&pmd->root_lock);
1691 1691
1692 return r; 1692 return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 35e954ea20a9..f6be0d733c20 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
195 195
196int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); 196int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
197 197
198int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); 198int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
199 199
200int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); 200int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
201int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); 201int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index dadd9696340c..e83b63608262 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -257,6 +257,7 @@ struct pool {
257 257
258 spinlock_t lock; 258 spinlock_t lock;
259 struct bio_list deferred_flush_bios; 259 struct bio_list deferred_flush_bios;
260 struct bio_list deferred_flush_completions;
260 struct list_head prepared_mappings; 261 struct list_head prepared_mappings;
261 struct list_head prepared_discards; 262 struct list_head prepared_discards;
262 struct list_head prepared_discards_pt2; 263 struct list_head prepared_discards_pt2;
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
956 mempool_free(m, &m->tc->pool->mapping_pool); 957 mempool_free(m, &m->tc->pool->mapping_pool);
957} 958}
958 959
960static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
961{
962 struct pool *pool = tc->pool;
963 unsigned long flags;
964
965 /*
966 * If the bio has the REQ_FUA flag set we must commit the metadata
967 * before signaling its completion.
968 */
969 if (!bio_triggers_commit(tc, bio)) {
970 bio_endio(bio);
971 return;
972 }
973
974 /*
975 * Complete bio with an error if earlier I/O caused changes to the
976 * metadata that can't be committed, e.g, due to I/O errors on the
977 * metadata device.
978 */
979 if (dm_thin_aborted_changes(tc->td)) {
980 bio_io_error(bio);
981 return;
982 }
983
984 /*
985 * Batch together any bios that trigger commits and then issue a
986 * single commit for them in process_deferred_bios().
987 */
988 spin_lock_irqsave(&pool->lock, flags);
989 bio_list_add(&pool->deferred_flush_completions, bio);
990 spin_unlock_irqrestore(&pool->lock, flags);
991}
992
959static void process_prepared_mapping(struct dm_thin_new_mapping *m) 993static void process_prepared_mapping(struct dm_thin_new_mapping *m)
960{ 994{
961 struct thin_c *tc = m->tc; 995 struct thin_c *tc = m->tc;
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
988 */ 1022 */
989 if (bio) { 1023 if (bio) {
990 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 1024 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
991 bio_endio(bio); 1025 complete_overwrite_bio(tc, bio);
992 } else { 1026 } else {
993 inc_all_io_entry(tc->pool, m->cell->holder); 1027 inc_all_io_entry(tc->pool, m->cell->holder);
994 remap_and_issue(tc, m->cell->holder, m->data_block); 1028 remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -1048,7 +1082,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1048 * passdown we have to check that these blocks are now unused. 1082 * passdown we have to check that these blocks are now unused.
1049 */ 1083 */
1050 int r = 0; 1084 int r = 0;
1051 bool used = true; 1085 bool shared = true;
1052 struct thin_c *tc = m->tc; 1086 struct thin_c *tc = m->tc;
1053 struct pool *pool = tc->pool; 1087 struct pool *pool = tc->pool;
1054 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; 1088 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
@@ -1058,11 +1092,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1058 while (b != end) { 1092 while (b != end) {
1059 /* find start of unmapped run */ 1093 /* find start of unmapped run */
1060 for (; b < end; b++) { 1094 for (; b < end; b++) {
1061 r = dm_pool_block_is_used(pool->pmd, b, &used); 1095 r = dm_pool_block_is_shared(pool->pmd, b, &shared);
1062 if (r) 1096 if (r)
1063 goto out; 1097 goto out;
1064 1098
1065 if (!used) 1099 if (!shared)
1066 break; 1100 break;
1067 } 1101 }
1068 1102
@@ -1071,11 +1105,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1071 1105
1072 /* find end of run */ 1106 /* find end of run */
1073 for (e = b + 1; e != end; e++) { 1107 for (e = b + 1; e != end; e++) {
1074 r = dm_pool_block_is_used(pool->pmd, e, &used); 1108 r = dm_pool_block_is_shared(pool->pmd, e, &shared);
1075 if (r) 1109 if (r)
1076 goto out; 1110 goto out;
1077 1111
1078 if (used) 1112 if (shared)
1079 break; 1113 break;
1080 } 1114 }
1081 1115
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
2317{ 2351{
2318 unsigned long flags; 2352 unsigned long flags;
2319 struct bio *bio; 2353 struct bio *bio;
2320 struct bio_list bios; 2354 struct bio_list bios, bio_completions;
2321 struct thin_c *tc; 2355 struct thin_c *tc;
2322 2356
2323 tc = get_first_thin(pool); 2357 tc = get_first_thin(pool);
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
2328 } 2362 }
2329 2363
2330 /* 2364 /*
2331 * If there are any deferred flush bios, we must commit 2365 * If there are any deferred flush bios, we must commit the metadata
2332 * the metadata before issuing them. 2366 * before issuing them or signaling their completion.
2333 */ 2367 */
2334 bio_list_init(&bios); 2368 bio_list_init(&bios);
2369 bio_list_init(&bio_completions);
2370
2335 spin_lock_irqsave(&pool->lock, flags); 2371 spin_lock_irqsave(&pool->lock, flags);
2336 bio_list_merge(&bios, &pool->deferred_flush_bios); 2372 bio_list_merge(&bios, &pool->deferred_flush_bios);
2337 bio_list_init(&pool->deferred_flush_bios); 2373 bio_list_init(&pool->deferred_flush_bios);
2374
2375 bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
2376 bio_list_init(&pool->deferred_flush_completions);
2338 spin_unlock_irqrestore(&pool->lock, flags); 2377 spin_unlock_irqrestore(&pool->lock, flags);
2339 2378
2340 if (bio_list_empty(&bios) && 2379 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
2341 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) 2380 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
2342 return; 2381 return;
2343 2382
2344 if (commit(pool)) { 2383 if (commit(pool)) {
2384 bio_list_merge(&bios, &bio_completions);
2385
2345 while ((bio = bio_list_pop(&bios))) 2386 while ((bio = bio_list_pop(&bios)))
2346 bio_io_error(bio); 2387 bio_io_error(bio);
2347 return; 2388 return;
2348 } 2389 }
2349 pool->last_commit_jiffies = jiffies; 2390 pool->last_commit_jiffies = jiffies;
2350 2391
2392 while ((bio = bio_list_pop(&bio_completions)))
2393 bio_endio(bio);
2394
2351 while ((bio = bio_list_pop(&bios))) 2395 while ((bio = bio_list_pop(&bios)))
2352 generic_make_request(bio); 2396 generic_make_request(bio);
2353} 2397}
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2954 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); 2998 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2955 spin_lock_init(&pool->lock); 2999 spin_lock_init(&pool->lock);
2956 bio_list_init(&pool->deferred_flush_bios); 3000 bio_list_init(&pool->deferred_flush_bios);
3001 bio_list_init(&pool->deferred_flush_completions);
2957 INIT_LIST_HEAD(&pool->prepared_mappings); 3002 INIT_LIST_HEAD(&pool->prepared_mappings);
2958 INIT_LIST_HEAD(&pool->prepared_discards); 3003 INIT_LIST_HEAD(&pool->prepared_discards);
2959 INIT_LIST_HEAD(&pool->prepared_discards_pt2); 3004 INIT_LIST_HEAD(&pool->prepared_discards_pt2);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d67c95ef8d7e..515e6af9bed2 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io)
699 true, duration, &io->stats_aux); 699 true, duration, &io->stats_aux);
700 700
701 /* nudge anyone waiting on suspend queue */ 701 /* nudge anyone waiting on suspend queue */
702 if (unlikely(waitqueue_active(&md->wait))) 702 if (unlikely(wq_has_sleeper(&md->wait)))
703 wake_up(&md->wait); 703 wake_up(&md->wait);
704} 704}
705 705
@@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1320 1320
1321 __bio_clone_fast(clone, bio); 1321 __bio_clone_fast(clone, bio);
1322 1322
1323 if (unlikely(bio_integrity(bio) != NULL)) { 1323 if (bio_integrity(bio)) {
1324 int r; 1324 int r;
1325 1325
1326 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1326 if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
@@ -1339,7 +1339,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1339 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1339 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1340 clone->bi_iter.bi_size = to_bytes(len); 1340 clone->bi_iter.bi_size = to_bytes(len);
1341 1341
1342 if (unlikely(bio_integrity(bio) != NULL)) 1342 if (bio_integrity(bio))
1343 bio_integrity_trim(clone); 1343 bio_integrity_trim(clone);
1344 1344
1345 return 0; 1345 return 0;
@@ -1588,6 +1588,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1588 ci->sector = bio->bi_iter.bi_sector; 1588 ci->sector = bio->bi_iter.bi_sector;
1589} 1589}
1590 1590
1591#define __dm_part_stat_sub(part, field, subnd) \
1592 (part_stat_get(part, field) -= (subnd))
1593
1591/* 1594/*
1592 * Entry point to split a bio into clones and submit them to the targets. 1595 * Entry point to split a bio into clones and submit them to the targets.
1593 */ 1596 */
@@ -1642,7 +1645,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1642 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1645 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1643 GFP_NOIO, &md->queue->bio_split); 1646 GFP_NOIO, &md->queue->bio_split);
1644 ci.io->orig_bio = b; 1647 ci.io->orig_bio = b;
1648
1649 /*
1650 * Adjust IO stats for each split, otherwise upon queue
1651 * reentry there will be redundant IO accounting.
1652 * NOTE: this is a stop-gap fix, a proper fix involves
1653 * significant refactoring of DM core's bio splitting
1654 * (by eliminating DM's splitting and just using bio_split)
1655 */
1656 part_stat_lock();
1657 __dm_part_stat_sub(&dm_disk(md)->part0,
1658 sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1659 part_stat_unlock();
1660
1645 bio_chain(b, bio); 1661 bio_chain(b, bio);
1662 trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
1646 ret = generic_make_request(bio); 1663 ret = generic_make_request(bio);
1647 break; 1664 break;
1648 } 1665 }
@@ -1713,6 +1730,15 @@ out:
1713 return ret; 1730 return ret;
1714} 1731}
1715 1732
1733static blk_qc_t dm_process_bio(struct mapped_device *md,
1734 struct dm_table *map, struct bio *bio)
1735{
1736 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
1737 return __process_bio(md, map, bio);
1738 else
1739 return __split_and_process_bio(md, map, bio);
1740}
1741
1716static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1742static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1717{ 1743{
1718 struct mapped_device *md = q->queuedata; 1744 struct mapped_device *md = q->queuedata;
@@ -1733,10 +1759,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1733 return ret; 1759 return ret;
1734 } 1760 }
1735 1761
1736 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) 1762 ret = dm_process_bio(md, map, bio);
1737 ret = __process_bio(md, map, bio);
1738 else
1739 ret = __split_and_process_bio(md, map, bio);
1740 1763
1741 dm_put_live_table(md, srcu_idx); 1764 dm_put_live_table(md, srcu_idx);
1742 return ret; 1765 return ret;
@@ -2415,9 +2438,9 @@ static void dm_wq_work(struct work_struct *work)
2415 break; 2438 break;
2416 2439
2417 if (dm_request_based(md)) 2440 if (dm_request_based(md))
2418 generic_make_request(c); 2441 (void) generic_make_request(c);
2419 else 2442 else
2420 __split_and_process_bio(md, map, c); 2443 (void) dm_process_bio(md, map, c);
2421 } 2444 }
2422 2445
2423 dm_put_live_table(md, srcu_idx); 2446 dm_put_live_table(md, srcu_idx);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fd4af4de03b4..05ffffb8b769 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -207,15 +207,10 @@ static bool create_on_open = true;
207struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 207struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
208 struct mddev *mddev) 208 struct mddev *mddev)
209{ 209{
210 struct bio *b;
211
212 if (!mddev || !bioset_initialized(&mddev->bio_set)) 210 if (!mddev || !bioset_initialized(&mddev->bio_set))
213 return bio_alloc(gfp_mask, nr_iovecs); 211 return bio_alloc(gfp_mask, nr_iovecs);
214 212
215 b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); 213 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
216 if (!b)
217 return NULL;
218 return b;
219} 214}
220EXPORT_SYMBOL_GPL(bio_alloc_mddev); 215EXPORT_SYMBOL_GPL(bio_alloc_mddev);
221 216
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1d54109071cc..fa47249fa3e4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
1863 reschedule_retry(r1_bio); 1863 reschedule_retry(r1_bio);
1864} 1864}
1865 1865
1866static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1867{
1868 sector_t sync_blocks = 0;
1869 sector_t s = r1_bio->sector;
1870 long sectors_to_go = r1_bio->sectors;
1871
1872 /* make sure these bits don't get cleared. */
1873 do {
1874 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1875 s += sync_blocks;
1876 sectors_to_go -= sync_blocks;
1877 } while (sectors_to_go > 0);
1878}
1879
1866static void end_sync_write(struct bio *bio) 1880static void end_sync_write(struct bio *bio)
1867{ 1881{
1868 int uptodate = !bio->bi_status; 1882 int uptodate = !bio->bi_status;
@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
1874 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; 1888 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1875 1889
1876 if (!uptodate) { 1890 if (!uptodate) {
1877 sector_t sync_blocks = 0; 1891 abort_sync_write(mddev, r1_bio);
1878 sector_t s = r1_bio->sector;
1879 long sectors_to_go = r1_bio->sectors;
1880 /* make sure these bits doesn't get cleared. */
1881 do {
1882 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1883 s += sync_blocks;
1884 sectors_to_go -= sync_blocks;
1885 } while (sectors_to_go > 0);
1886 set_bit(WriteErrorSeen, &rdev->flags); 1892 set_bit(WriteErrorSeen, &rdev->flags);
1887 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1893 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1888 set_bit(MD_RECOVERY_NEEDED, & 1894 set_bit(MD_RECOVERY_NEEDED, &
@@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2172 (i == r1_bio->read_disk || 2178 (i == r1_bio->read_disk ||
2173 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 2179 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2174 continue; 2180 continue;
2175 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 2181 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2182 abort_sync_write(mddev, r1_bio);
2176 continue; 2183 continue;
2184 }
2177 2185
2178 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2186 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2179 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) 2187 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index ec3a5ef7fee0..cbbe6b6535be 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1935,12 +1935,14 @@ out:
1935} 1935}
1936 1936
1937static struct stripe_head * 1937static struct stripe_head *
1938r5c_recovery_alloc_stripe(struct r5conf *conf, 1938r5c_recovery_alloc_stripe(
1939 sector_t stripe_sect) 1939 struct r5conf *conf,
1940 sector_t stripe_sect,
1941 int noblock)
1940{ 1942{
1941 struct stripe_head *sh; 1943 struct stripe_head *sh;
1942 1944
1943 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); 1945 sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
1944 if (!sh) 1946 if (!sh)
1945 return NULL; /* no more stripe available */ 1947 return NULL; /* no more stripe available */
1946 1948
@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2150 stripe_sect); 2152 stripe_sect);
2151 2153
2152 if (!sh) { 2154 if (!sh) {
2153 sh = r5c_recovery_alloc_stripe(conf, stripe_sect); 2155 sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2154 /* 2156 /*
2155 * cannot get stripe from raid5_get_active_stripe 2157 * cannot get stripe from raid5_get_active_stripe
2156 * try replay some stripes 2158 * try replay some stripes
@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2159 r5c_recovery_replay_stripes( 2161 r5c_recovery_replay_stripes(
2160 cached_stripe_list, ctx); 2162 cached_stripe_list, ctx);
2161 sh = r5c_recovery_alloc_stripe( 2163 sh = r5c_recovery_alloc_stripe(
2162 conf, stripe_sect); 2164 conf, stripe_sect, 1);
2163 } 2165 }
2164 if (!sh) { 2166 if (!sh) {
2167 int new_size = conf->min_nr_stripes * 2;
2165 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", 2168 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2166 mdname(mddev), 2169 mdname(mddev),
2167 conf->min_nr_stripes * 2); 2170 new_size);
2168 raid5_set_cache_size(mddev, 2171 ret = raid5_set_cache_size(mddev, new_size);
2169 conf->min_nr_stripes * 2); 2172 if (conf->min_nr_stripes <= new_size / 2) {
2170 sh = r5c_recovery_alloc_stripe(conf, 2173 pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2171 stripe_sect); 2174 mdname(mddev),
2175 ret,
2176 new_size,
2177 conf->min_nr_stripes,
2178 conf->max_nr_stripes);
2179 return -ENOMEM;
2180 }
2181 sh = r5c_recovery_alloc_stripe(
2182 conf, stripe_sect, 0);
2172 } 2183 }
2173 if (!sh) { 2184 if (!sh) {
2174 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", 2185 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2175 mdname(mddev)); 2186 mdname(mddev));
2176 return -ENOMEM; 2187 return -ENOMEM;
2177 } 2188 }
2178 list_add_tail(&sh->lru, cached_stripe_list); 2189 list_add_tail(&sh->lru, cached_stripe_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4990f0319f6c..cecea901ab8c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
6369int 6369int
6370raid5_set_cache_size(struct mddev *mddev, int size) 6370raid5_set_cache_size(struct mddev *mddev, int size)
6371{ 6371{
6372 int result = 0;
6372 struct r5conf *conf = mddev->private; 6373 struct r5conf *conf = mddev->private;
6373 6374
6374 if (size <= 16 || size > 32768) 6375 if (size <= 16 || size > 32768)
@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
6385 6386
6386 mutex_lock(&conf->cache_size_mutex); 6387 mutex_lock(&conf->cache_size_mutex);
6387 while (size > conf->max_nr_stripes) 6388 while (size > conf->max_nr_stripes)
6388 if (!grow_one_stripe(conf, GFP_KERNEL)) 6389 if (!grow_one_stripe(conf, GFP_KERNEL)) {
6390 conf->min_nr_stripes = conf->max_nr_stripes;
6391 result = -ENOMEM;
6389 break; 6392 break;
6393 }
6390 mutex_unlock(&conf->cache_size_mutex); 6394 mutex_unlock(&conf->cache_size_mutex);
6391 6395
6392 return 0; 6396 return result;
6393} 6397}
6394EXPORT_SYMBOL(raid5_set_cache_size); 6398EXPORT_SYMBOL(raid5_set_cache_size);
6395 6399
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 447baaebca44..cdb79ae2d8dc 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -218,8 +218,8 @@ static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
218{ 218{
219 struct device *dev = &cio2->pci_dev->dev; 219 struct device *dev = &cio2->pci_dev->dev;
220 220
221 q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, 221 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
222 GFP_KERNEL); 222 GFP_KERNEL);
223 if (!q->fbpt) 223 if (!q->fbpt)
224 return -ENOMEM; 224 return -ENOMEM;
225 225
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index e80123cba406..060c0ad6243a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -49,7 +49,7 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
49 struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data; 49 struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
50 struct device *dev = &ctx->dev->plat_dev->dev; 50 struct device *dev = &ctx->dev->plat_dev->dev;
51 51
52 mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); 52 mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
53 if (!mem->va) { 53 if (!mem->va) {
54 mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev), 54 mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
55 size); 55 size);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index d01821a6906a..89d9c4c21037 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -807,7 +807,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
807 struct vb2_v4l2_buffer *vbuf; 807 struct vb2_v4l2_buffer *vbuf;
808 unsigned long flags; 808 unsigned long flags;
809 809
810 cancel_delayed_work_sync(&dev->work_run); 810 if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
811 cancel_delayed_work_sync(&dev->work_run);
812
811 for (;;) { 813 for (;;) {
812 if (V4L2_TYPE_IS_OUTPUT(q->type)) 814 if (V4L2_TYPE_IS_OUTPUT(q->type))
813 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 815 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 1441a73ce64c..90aad465f9ed 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -287,6 +287,7 @@ static void v4l_print_format(const void *arg, bool write_only)
287 const struct v4l2_window *win; 287 const struct v4l2_window *win;
288 const struct v4l2_sdr_format *sdr; 288 const struct v4l2_sdr_format *sdr;
289 const struct v4l2_meta_format *meta; 289 const struct v4l2_meta_format *meta;
290 u32 planes;
290 unsigned i; 291 unsigned i;
291 292
292 pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); 293 pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -317,7 +318,8 @@ static void v4l_print_format(const void *arg, bool write_only)
317 prt_names(mp->field, v4l2_field_names), 318 prt_names(mp->field, v4l2_field_names),
318 mp->colorspace, mp->num_planes, mp->flags, 319 mp->colorspace, mp->num_planes, mp->flags,
319 mp->ycbcr_enc, mp->quantization, mp->xfer_func); 320 mp->ycbcr_enc, mp->quantization, mp->xfer_func);
320 for (i = 0; i < mp->num_planes; i++) 321 planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
322 for (i = 0; i < planes; i++)
321 printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, 323 printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
322 mp->plane_fmt[i].bytesperline, 324 mp->plane_fmt[i].bytesperline,
323 mp->plane_fmt[i].sizeimage); 325 mp->plane_fmt[i].sizeimage);
@@ -1551,8 +1553,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1551 if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) 1553 if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
1552 break; 1554 break;
1553 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1555 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1556 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1557 break;
1554 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1558 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1555 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1559 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1560 bytesperline);
1556 return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); 1561 return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
1557 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1562 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1558 if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) 1563 if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
@@ -1581,8 +1586,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1581 if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) 1586 if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
1582 break; 1587 break;
1583 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1588 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1589 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1590 break;
1584 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1591 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1585 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1592 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1593 bytesperline);
1586 return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); 1594 return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
1587 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 1595 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
1588 if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) 1596 if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
@@ -1648,8 +1656,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1648 if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) 1656 if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
1649 break; 1657 break;
1650 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1658 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1659 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1660 break;
1651 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1661 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1652 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1662 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1663 bytesperline);
1653 return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); 1664 return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
1654 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1665 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1655 if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) 1666 if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
@@ -1678,8 +1689,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1678 if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) 1689 if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
1679 break; 1690 break;
1680 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1691 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1692 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1693 break;
1681 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1694 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1682 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1695 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1696 bytesperline);
1683 return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); 1697 return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
1684 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 1698 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
1685 if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) 1699 if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 8c5dfdce4326..76f9909cf396 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -102,6 +102,7 @@ config MFD_AAT2870_CORE
102config MFD_AT91_USART 102config MFD_AT91_USART
103 tristate "AT91 USART Driver" 103 tristate "AT91 USART Driver"
104 select MFD_CORE 104 select MFD_CORE
105 depends on ARCH_AT91 || COMPILE_TEST
105 help 106 help
106 Select this to get support for AT91 USART IP. This is a wrapper 107 Select this to get support for AT91 USART IP. This is a wrapper
107 over at91-usart-serial driver and usart-spi-driver. Only one function 108 over at91-usart-serial driver and usart-spi-driver. Only one function
@@ -1418,7 +1419,7 @@ config MFD_TPS65217
1418 1419
1419config MFD_TPS68470 1420config MFD_TPS68470
1420 bool "TI TPS68470 Power Management / LED chips" 1421 bool "TI TPS68470 Power Management / LED chips"
1421 depends on ACPI && I2C=y 1422 depends on ACPI && PCI && I2C=y
1422 select MFD_CORE 1423 select MFD_CORE
1423 select REGMAP_I2C 1424 select REGMAP_I2C
1424 select I2C_DESIGNWARE_PLATFORM 1425 select I2C_DESIGNWARE_PLATFORM
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 30d09d177171..11ab17f64c64 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
261 mutex_unlock(&ab8500->lock); 261 mutex_unlock(&ab8500->lock);
262 dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); 262 dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
263 263
264 return ret; 264 return (ret < 0) ? ret : 0;
265} 265}
266 266
267static int ab8500_get_register(struct device *dev, u8 bank, 267static int ab8500_get_register(struct device *dev, u8 bank,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index e1450a56fc07..3c97f2c0fdfe 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -641,9 +641,9 @@ static const struct mfd_cell axp221_cells[] = {
641 641
642static const struct mfd_cell axp223_cells[] = { 642static const struct mfd_cell axp223_cells[] = {
643 { 643 {
644 .name = "axp221-pek", 644 .name = "axp221-pek",
645 .num_resources = ARRAY_SIZE(axp22x_pek_resources), 645 .num_resources = ARRAY_SIZE(axp22x_pek_resources),
646 .resources = axp22x_pek_resources, 646 .resources = axp22x_pek_resources,
647 }, { 647 }, {
648 .name = "axp22x-adc", 648 .name = "axp22x-adc",
649 .of_compatible = "x-powers,axp221-adc", 649 .of_compatible = "x-powers,axp221-adc",
@@ -651,7 +651,7 @@ static const struct mfd_cell axp223_cells[] = {
651 .name = "axp20x-battery-power-supply", 651 .name = "axp20x-battery-power-supply",
652 .of_compatible = "x-powers,axp221-battery-power-supply", 652 .of_compatible = "x-powers,axp221-battery-power-supply",
653 }, { 653 }, {
654 .name = "axp20x-regulator", 654 .name = "axp20x-regulator",
655 }, { 655 }, {
656 .name = "axp20x-ac-power-supply", 656 .name = "axp20x-ac-power-supply",
657 .of_compatible = "x-powers,axp221-ac-power-supply", 657 .of_compatible = "x-powers,axp221-ac-power-supply",
@@ -667,9 +667,9 @@ static const struct mfd_cell axp223_cells[] = {
667 667
668static const struct mfd_cell axp152_cells[] = { 668static const struct mfd_cell axp152_cells[] = {
669 { 669 {
670 .name = "axp20x-pek", 670 .name = "axp20x-pek",
671 .num_resources = ARRAY_SIZE(axp152_pek_resources), 671 .num_resources = ARRAY_SIZE(axp152_pek_resources),
672 .resources = axp152_pek_resources, 672 .resources = axp152_pek_resources,
673 }, 673 },
674}; 674};
675 675
@@ -698,87 +698,101 @@ static const struct resource axp288_charger_resources[] = {
698 698
699static const struct mfd_cell axp288_cells[] = { 699static const struct mfd_cell axp288_cells[] = {
700 { 700 {
701 .name = "axp288_adc", 701 .name = "axp288_adc",
702 .num_resources = ARRAY_SIZE(axp288_adc_resources), 702 .num_resources = ARRAY_SIZE(axp288_adc_resources),
703 .resources = axp288_adc_resources, 703 .resources = axp288_adc_resources,
704 }, 704 }, {
705 { 705 .name = "axp288_extcon",
706 .name = "axp288_extcon", 706 .num_resources = ARRAY_SIZE(axp288_extcon_resources),
707 .num_resources = ARRAY_SIZE(axp288_extcon_resources), 707 .resources = axp288_extcon_resources,
708 .resources = axp288_extcon_resources, 708 }, {
709 }, 709 .name = "axp288_charger",
710 { 710 .num_resources = ARRAY_SIZE(axp288_charger_resources),
711 .name = "axp288_charger", 711 .resources = axp288_charger_resources,
712 .num_resources = ARRAY_SIZE(axp288_charger_resources), 712 }, {
713 .resources = axp288_charger_resources, 713 .name = "axp288_fuel_gauge",
714 }, 714 .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
715 { 715 .resources = axp288_fuel_gauge_resources,
716 .name = "axp288_fuel_gauge", 716 }, {
717 .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), 717 .name = "axp221-pek",
718 .resources = axp288_fuel_gauge_resources, 718 .num_resources = ARRAY_SIZE(axp288_power_button_resources),
719 }, 719 .resources = axp288_power_button_resources,
720 { 720 }, {
721 .name = "axp221-pek", 721 .name = "axp288_pmic_acpi",
722 .num_resources = ARRAY_SIZE(axp288_power_button_resources),
723 .resources = axp288_power_button_resources,
724 },
725 {
726 .name = "axp288_pmic_acpi",
727 }, 722 },
728}; 723};
729 724
730static const struct mfd_cell axp803_cells[] = { 725static const struct mfd_cell axp803_cells[] = {
731 { 726 {
732 .name = "axp221-pek", 727 .name = "axp221-pek",
733 .num_resources = ARRAY_SIZE(axp803_pek_resources), 728 .num_resources = ARRAY_SIZE(axp803_pek_resources),
734 .resources = axp803_pek_resources, 729 .resources = axp803_pek_resources,
730 }, {
731 .name = "axp20x-gpio",
732 .of_compatible = "x-powers,axp813-gpio",
733 }, {
734 .name = "axp813-adc",
735 .of_compatible = "x-powers,axp813-adc",
736 }, {
737 .name = "axp20x-battery-power-supply",
738 .of_compatible = "x-powers,axp813-battery-power-supply",
739 }, {
740 .name = "axp20x-ac-power-supply",
741 .of_compatible = "x-powers,axp813-ac-power-supply",
742 .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
743 .resources = axp20x_ac_power_supply_resources,
735 }, 744 },
736 { .name = "axp20x-regulator" }, 745 { .name = "axp20x-regulator" },
737}; 746};
738 747
739static const struct mfd_cell axp806_self_working_cells[] = { 748static const struct mfd_cell axp806_self_working_cells[] = {
740 { 749 {
741 .name = "axp221-pek", 750 .name = "axp221-pek",
742 .num_resources = ARRAY_SIZE(axp806_pek_resources), 751 .num_resources = ARRAY_SIZE(axp806_pek_resources),
743 .resources = axp806_pek_resources, 752 .resources = axp806_pek_resources,
744 }, 753 },
745 { .name = "axp20x-regulator" }, 754 { .name = "axp20x-regulator" },
746}; 755};
747 756
748static const struct mfd_cell axp806_cells[] = { 757static const struct mfd_cell axp806_cells[] = {
749 { 758 {
750 .id = 2, 759 .id = 2,
751 .name = "axp20x-regulator", 760 .name = "axp20x-regulator",
752 }, 761 },
753}; 762};
754 763
755static const struct mfd_cell axp809_cells[] = { 764static const struct mfd_cell axp809_cells[] = {
756 { 765 {
757 .name = "axp221-pek", 766 .name = "axp221-pek",
758 .num_resources = ARRAY_SIZE(axp809_pek_resources), 767 .num_resources = ARRAY_SIZE(axp809_pek_resources),
759 .resources = axp809_pek_resources, 768 .resources = axp809_pek_resources,
760 }, { 769 }, {
761 .id = 1, 770 .id = 1,
762 .name = "axp20x-regulator", 771 .name = "axp20x-regulator",
763 }, 772 },
764}; 773};
765 774
766static const struct mfd_cell axp813_cells[] = { 775static const struct mfd_cell axp813_cells[] = {
767 { 776 {
768 .name = "axp221-pek", 777 .name = "axp221-pek",
769 .num_resources = ARRAY_SIZE(axp803_pek_resources), 778 .num_resources = ARRAY_SIZE(axp803_pek_resources),
770 .resources = axp803_pek_resources, 779 .resources = axp803_pek_resources,
771 }, { 780 }, {
772 .name = "axp20x-regulator", 781 .name = "axp20x-regulator",
773 }, { 782 }, {
774 .name = "axp20x-gpio", 783 .name = "axp20x-gpio",
775 .of_compatible = "x-powers,axp813-gpio", 784 .of_compatible = "x-powers,axp813-gpio",
776 }, { 785 }, {
777 .name = "axp813-adc", 786 .name = "axp813-adc",
778 .of_compatible = "x-powers,axp813-adc", 787 .of_compatible = "x-powers,axp813-adc",
779 }, { 788 }, {
780 .name = "axp20x-battery-power-supply", 789 .name = "axp20x-battery-power-supply",
781 .of_compatible = "x-powers,axp813-battery-power-supply", 790 .of_compatible = "x-powers,axp813-battery-power-supply",
791 }, {
792 .name = "axp20x-ac-power-supply",
793 .of_compatible = "x-powers,axp813-ac-power-supply",
794 .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
795 .resources = axp20x_ac_power_supply_resources,
782 }, 796 },
783}; 797};
784 798
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 503979c81dae..fab3cdc27ed6 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
59}; 59};
60 60
61static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = { 61static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
62 regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
62 regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN), 63 regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
63 regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT), 64 regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
64 regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ), 65 regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index b99a194ce5a4..2d0fee488c5a 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
499 499
500 cros_ec_debugfs_remove(ec); 500 cros_ec_debugfs_remove(ec);
501 501
502 mfd_remove_devices(ec->dev);
502 cdev_del(&ec->cdev); 503 cdev_del(&ec->cdev);
503 device_unregister(&ec->class_dev); 504 device_unregister(&ec->class_dev);
504 return 0; 505 return 0;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5970b8def548..aec20e1c7d3d 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
2584 .irq_unmask = prcmu_irq_unmask, 2584 .irq_unmask = prcmu_irq_unmask,
2585}; 2585};
2586 2586
2587static __init char *fw_project_name(u32 project) 2587static char *fw_project_name(u32 project)
2588{ 2588{
2589 switch (project) { 2589 switch (project) {
2590 case PRCMU_FW_PROJECT_U8500: 2590 case PRCMU_FW_PROJECT_U8500:
@@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
2732 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); 2732 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
2733} 2733}
2734 2734
2735static void __init init_prcm_registers(void) 2735static void init_prcm_registers(void)
2736{ 2736{
2737 u32 val; 2737 u32 val;
2738 2738
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index ca829f85672f..2713de989f05 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -82,11 +82,13 @@ static void exynos_lpass_enable(struct exynos_lpass *lpass)
82 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); 82 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S);
83 83
84 regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK, 84 regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK,
85 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); 85 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S |
86 LPASS_INTR_UART);
86 87
87 exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET); 88 exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET);
88 exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET); 89 exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET);
89 exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET); 90 exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET);
91 exynos_lpass_core_sw_reset(lpass, LPASS_UART_SW_RESET);
90} 92}
91 93
92static void exynos_lpass_disable(struct exynos_lpass *lpass) 94static void exynos_lpass_disable(struct exynos_lpass *lpass)
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 440030cecbbd..2a77988d0462 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -15,6 +15,7 @@
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/mfd/core.h> 16#include <linux/mfd/core.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/mutex.h>
18#include <linux/notifier.h> 19#include <linux/notifier.h>
19#include <linux/of.h> 20#include <linux/of.h>
20#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
@@ -155,7 +156,7 @@ static int madera_wait_for_boot(struct madera *madera)
155 usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2, 156 usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2,
156 MADERA_BOOT_POLL_INTERVAL_USEC); 157 MADERA_BOOT_POLL_INTERVAL_USEC);
157 regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val); 158 regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
158 }; 159 }
159 160
160 if (!(val & MADERA_BOOT_DONE_STS1)) { 161 if (!(val & MADERA_BOOT_DONE_STS1)) {
161 dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n"); 162 dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n");
@@ -357,6 +358,8 @@ int madera_dev_init(struct madera *madera)
357 358
358 dev_set_drvdata(madera->dev, madera); 359 dev_set_drvdata(madera->dev, madera);
359 BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier); 360 BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier);
361 mutex_init(&madera->dapm_ptr_lock);
362
360 madera_set_micbias_info(madera); 363 madera_set_micbias_info(madera);
361 364
362 /* 365 /*
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index d8217366ed36..d8ddd1a6f304 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -280,7 +280,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
280 280
281 for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) { 281 for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) {
282 sprintf(fps_name, "fps%d", fps_id); 282 sprintf(fps_name, "fps%d", fps_id);
283 if (!strcmp(fps_np->name, fps_name)) 283 if (of_node_name_eq(fps_np, fps_name))
284 break; 284 break;
285 } 285 }
286 286
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index f475e848252f..d0bf50e3568d 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
274 274
275 mc13xxx->adcflags |= MC13XXX_ADC_WORKING; 275 mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
276 276
277 mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); 277 ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
278 if (ret)
279 goto out;
278 280
279 adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 | 281 adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
280 MC13XXX_ADC0_CHRGRAWDIV; 282 MC13XXX_ADC0_CHRGRAWDIV;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 77b64bd64df3..ab24e176ef44 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
329 329
330 default: 330 default:
331 dev_err(&pdev->dev, "unsupported chip: %d\n", id); 331 dev_err(&pdev->dev, "unsupported chip: %d\n", id);
332 ret = -ENODEV; 332 return -ENODEV;
333 break;
334 } 333 }
335 334
336 if (ret) { 335 if (ret) {
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 52fafea06067..8d420c37b2a6 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
638 return -EFAULT; 638 return -EFAULT;
639 } 639 }
640 640
641 writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
642 writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
643 writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
644
641 dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0], 645 dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
642 fw_version[1], 646 fw_version[1],
643 fw_version[2]); 647 fw_version[2]);
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 2a8369657e38..26c7b63e008a 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -109,7 +109,7 @@ struct rave_sp_reply {
109/** 109/**
110 * struct rave_sp_checksum - Variant specific checksum implementation details 110 * struct rave_sp_checksum - Variant specific checksum implementation details
111 * 111 *
112 * @length: Caculated checksum length 112 * @length: Calculated checksum length
113 * @subroutine: Utilized checksum algorithm implementation 113 * @subroutine: Utilized checksum algorithm implementation
114 */ 114 */
115struct rave_sp_checksum { 115struct rave_sp_checksum {
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 566caca4efd8..7569a4be0608 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1302,17 +1302,17 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata,
1302 pdata->autosleep = (pdata->autosleep_timeout) ? true : false; 1302 pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
1303 1303
1304 for_each_child_of_node(np, child) { 1304 for_each_child_of_node(np, child) {
1305 if (!strcmp(child->name, "stmpe_gpio")) { 1305 if (of_node_name_eq(child, "stmpe_gpio")) {
1306 pdata->blocks |= STMPE_BLOCK_GPIO; 1306 pdata->blocks |= STMPE_BLOCK_GPIO;
1307 } else if (!strcmp(child->name, "stmpe_keypad")) { 1307 } else if (of_node_name_eq(child, "stmpe_keypad")) {
1308 pdata->blocks |= STMPE_BLOCK_KEYPAD; 1308 pdata->blocks |= STMPE_BLOCK_KEYPAD;
1309 } else if (!strcmp(child->name, "stmpe_touchscreen")) { 1309 } else if (of_node_name_eq(child, "stmpe_touchscreen")) {
1310 pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN; 1310 pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
1311 } else if (!strcmp(child->name, "stmpe_adc")) { 1311 } else if (of_node_name_eq(child, "stmpe_adc")) {
1312 pdata->blocks |= STMPE_BLOCK_ADC; 1312 pdata->blocks |= STMPE_BLOCK_ADC;
1313 } else if (!strcmp(child->name, "stmpe_pwm")) { 1313 } else if (of_node_name_eq(child, "stmpe_pwm")) {
1314 pdata->blocks |= STMPE_BLOCK_PWM; 1314 pdata->blocks |= STMPE_BLOCK_PWM;
1315 } else if (!strcmp(child->name, "stmpe_rotator")) { 1315 } else if (of_node_name_eq(child, "stmpe_rotator")) {
1316 pdata->blocks |= STMPE_BLOCK_ROTATOR; 1316 pdata->blocks |= STMPE_BLOCK_ROTATOR;
1317 } 1317 }
1318 } 1318 }
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index c2d47d78705b..fd111296b959 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
264 cell->pdata_size = sizeof(tscadc); 264 cell->pdata_size = sizeof(tscadc);
265 } 265 }
266 266
267 err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells, 267 err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
268 tscadc->used_cells, NULL, 0, NULL); 268 tscadc->cells, tscadc->used_cells, NULL,
269 0, NULL);
269 if (err < 0) 270 if (err < 0)
270 goto err_disable_clk; 271 goto err_disable_clk;
271 272
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 910f569ff77c..8bcdecf494d0 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
235 235
236 mutex_init(&tps->tps_lock); 236 mutex_init(&tps->tps_lock);
237 237
238 ret = regmap_add_irq_chip(tps->regmap, tps->irq, 238 ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
239 IRQF_ONESHOT, 0, &tps65218_irq_chip, 239 IRQF_ONESHOT, 0, &tps65218_irq_chip,
240 &tps->irq_data); 240 &tps->irq_data);
241 if (ret < 0) 241 if (ret < 0)
242 return ret; 242 return ret;
243 243
@@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
253 ARRAY_SIZE(tps65218_cells), NULL, 0, 253 ARRAY_SIZE(tps65218_cells), NULL, 0,
254 regmap_irq_get_domain(tps->irq_data)); 254 regmap_irq_get_domain(tps->irq_data));
255 255
256 if (ret < 0)
257 goto err_irq;
258
259 return 0;
260
261err_irq:
262 regmap_del_irq_chip(tps->irq, tps->irq_data);
263
264 return ret; 256 return ret;
265} 257}
266 258
267static int tps65218_remove(struct i2c_client *client)
268{
269 struct tps65218 *tps = i2c_get_clientdata(client);
270
271 regmap_del_irq_chip(tps->irq, tps->irq_data);
272
273 return 0;
274}
275
276static const struct i2c_device_id tps65218_id_table[] = { 259static const struct i2c_device_id tps65218_id_table[] = {
277 { "tps65218", TPS65218 }, 260 { "tps65218", TPS65218 },
278 { }, 261 { },
@@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
285 .of_match_table = of_tps65218_match_table, 268 .of_match_table = of_tps65218_match_table,
286 }, 269 },
287 .probe = tps65218_probe, 270 .probe = tps65218_probe,
288 .remove = tps65218_remove,
289 .id_table = tps65218_id_table, 271 .id_table = tps65218_id_table,
290}; 272};
291 273
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b89379782741..9c7925ca13cf 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
592 return 0; 592 return 0;
593} 593}
594 594
595static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
596{
597 struct tps6586x *tps6586x = dev_get_drvdata(dev);
598
599 if (tps6586x->client->irq)
600 disable_irq(tps6586x->client->irq);
601
602 return 0;
603}
604
605static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
606{
607 struct tps6586x *tps6586x = dev_get_drvdata(dev);
608
609 if (tps6586x->client->irq)
610 enable_irq(tps6586x->client->irq);
611
612 return 0;
613}
614
615static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
616 tps6586x_i2c_resume);
617
595static const struct i2c_device_id tps6586x_id_table[] = { 618static const struct i2c_device_id tps6586x_id_table[] = {
596 { "tps6586x", 0 }, 619 { "tps6586x", 0 },
597 { }, 620 { },
@@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = {
602 .driver = { 625 .driver = {
603 .name = "tps6586x", 626 .name = "tps6586x",
604 .of_match_table = of_match_ptr(tps6586x_of_match), 627 .of_match_table = of_match_ptr(tps6586x_of_match),
628 .pm = &tps6586x_pm_ops,
605 }, 629 },
606 .probe = tps6586x_i2c_probe, 630 .probe = tps6586x_i2c_probe,
607 .remove = tps6586x_i2c_remove, 631 .remove = tps6586x_i2c_remove,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 4be3d239da9e..299016bc46d9 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
979 * letting it generate the right frequencies for USB, MADC, and 979 * letting it generate the right frequencies for USB, MADC, and
980 * other purposes. 980 * other purposes.
981 */ 981 */
982static inline int __init protect_pm_master(void) 982static inline int protect_pm_master(void)
983{ 983{
984 int e = 0; 984 int e = 0;
985 985
@@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
988 return e; 988 return e;
989} 989}
990 990
991static inline int __init unprotect_pm_master(void) 991static inline int unprotect_pm_master(void)
992{ 992{
993 int e = 0; 993 int e = 0;
994 994
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 1ee68bd440fb..16c6e2accfaa 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
1618 { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */ 1618 { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
1619 { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */ 1619 { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
1620 { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */ 1620 { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
1621 { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
1621 { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */ 1622 { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
1622 { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */ 1623 { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
1623 { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */ 1624 { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
2869 case ARIZONA_ASRC_ENABLE: 2870 case ARIZONA_ASRC_ENABLE:
2870 case ARIZONA_ASRC_STATUS: 2871 case ARIZONA_ASRC_STATUS:
2871 case ARIZONA_ASRC_RATE1: 2872 case ARIZONA_ASRC_RATE1:
2873 case ARIZONA_ASRC_RATE2:
2872 case ARIZONA_ISRC_1_CTRL_1: 2874 case ARIZONA_ISRC_1_CTRL_1:
2873 case ARIZONA_ISRC_1_CTRL_2: 2875 case ARIZONA_ISRC_1_CTRL_2:
2874 case ARIZONA_ISRC_1_CTRL_3: 2876 case ARIZONA_ISRC_1_CTRL_3:
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index efe2fb72d54b..25265fd0fd6e 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -218,8 +218,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
218 if (get_order(size) >= MAX_ORDER) 218 if (get_order(size) >= MAX_ORDER)
219 return NULL; 219 return NULL;
220 220
221 return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, 221 return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
222 GFP_KERNEL); 222 GFP_KERNEL);
223} 223}
224 224
225void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, 225void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index b8aaa684c397..2ed23c99f59f 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
820 * 820 *
821 * Return: 821 * Return:
822 * 0 - Success 822 * 0 - Success
823 * Non-zero - Failure
823 */ 824 */
824static int ibmvmc_open(struct inode *inode, struct file *file) 825static int ibmvmc_open(struct inode *inode, struct file *file)
825{ 826{
826 struct ibmvmc_file_session *session; 827 struct ibmvmc_file_session *session;
827 int rc = 0;
828 828
829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, 829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
830 (unsigned long)inode, (unsigned long)file, 830 (unsigned long)inode, (unsigned long)file,
831 ibmvmc.state); 831 ibmvmc.state);
832 832
833 session = kzalloc(sizeof(*session), GFP_KERNEL); 833 session = kzalloc(sizeof(*session), GFP_KERNEL);
834 if (!session)
835 return -ENOMEM;
836
834 session->file = file; 837 session->file = file;
835 file->private_data = session; 838 file->private_data = session;
836 839
837 return rc; 840 return 0;
838} 841}
839 842
840/** 843/**
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1fc8ea0f519b..ca4c9cc218a2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -401,8 +401,11 @@ static void mei_io_list_flush_cl(struct list_head *head,
401 struct mei_cl_cb *cb, *next; 401 struct mei_cl_cb *cb, *next;
402 402
403 list_for_each_entry_safe(cb, next, head, list) { 403 list_for_each_entry_safe(cb, next, head, list) {
404 if (cl == cb->cl) 404 if (cl == cb->cl) {
405 list_del_init(&cb->list); 405 list_del_init(&cb->list);
406 if (cb->fop_type == MEI_FOP_READ)
407 mei_io_cb_free(cb);
408 }
406 } 409 }
407} 410}
408 411
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 78c26cebf5d4..8f7616557c97 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1187,9 +1187,15 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
1187 dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; 1187 dma_setup_res = (struct hbm_dma_setup_response *)mei_msg;
1188 1188
1189 if (dma_setup_res->status) { 1189 if (dma_setup_res->status) {
1190 dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", 1190 u8 status = dma_setup_res->status;
1191 dma_setup_res->status, 1191
1192 mei_hbm_status_str(dma_setup_res->status)); 1192 if (status == MEI_HBMS_NOT_ALLOWED) {
1193 dev_dbg(dev->dev, "hbm: dma setup not allowed\n");
1194 } else {
1195 dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
1196 status,
1197 mei_hbm_status_str(status));
1198 }
1193 dev->hbm_f_dr_supported = 0; 1199 dev->hbm_f_dr_supported = 0;
1194 mei_dmam_ring_free(dev); 1200 mei_dmam_ring_free(dev);
1195 } 1201 }
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index e4b10b2d1a08..bb1ee9834a02 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -127,6 +127,8 @@
127#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ 127#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
128#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ 128#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
129 129
130#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
131
130#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ 132#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
131 133
132#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ 134#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
@@ -137,6 +139,8 @@
137#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ 139#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
138#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ 140#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
139 141
142#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
143
140/* 144/*
141 * MEI HW Section 145 * MEI HW Section
142 */ 146 */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 73ace2d59dea..3ab946ad3257 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, 88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, 89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
90 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, 90 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
91 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)}, 91 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
92 92
93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, 93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, 94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
95 95
96 {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
97
96 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, 98 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
97 99
98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, 100 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
@@ -103,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
103 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, 105 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
104 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, 106 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
105 107
108 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
109
106 /* required last entry */ 110 /* required last entry */
107 {0, } 111 {0, }
108}; 112};
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 6b212c8b78e7..744757f541be 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -47,7 +47,8 @@
47 * @dc: Virtio device control 47 * @dc: Virtio device control
48 * @vpdev: VOP device which is the parent for this virtio device 48 * @vpdev: VOP device which is the parent for this virtio device
49 * @vr: Buffer for accessing the VRING 49 * @vr: Buffer for accessing the VRING
50 * @used: Buffer for used 50 * @used_virt: Virtual address of used ring
51 * @used: DMA address of used ring
51 * @used_size: Size of the used buffer 52 * @used_size: Size of the used buffer
52 * @reset_done: Track whether VOP reset is complete 53 * @reset_done: Track whether VOP reset is complete
53 * @virtio_cookie: Cookie returned upon requesting a interrupt 54 * @virtio_cookie: Cookie returned upon requesting a interrupt
@@ -61,6 +62,7 @@ struct _vop_vdev {
61 struct mic_device_ctrl __iomem *dc; 62 struct mic_device_ctrl __iomem *dc;
62 struct vop_device *vpdev; 63 struct vop_device *vpdev;
63 void __iomem *vr[VOP_MAX_VRINGS]; 64 void __iomem *vr[VOP_MAX_VRINGS];
65 void *used_virt[VOP_MAX_VRINGS];
64 dma_addr_t used[VOP_MAX_VRINGS]; 66 dma_addr_t used[VOP_MAX_VRINGS];
65 int used_size[VOP_MAX_VRINGS]; 67 int used_size[VOP_MAX_VRINGS];
66 struct completion reset_done; 68 struct completion reset_done;
@@ -260,12 +262,12 @@ static bool vop_notify(struct virtqueue *vq)
260static void vop_del_vq(struct virtqueue *vq, int n) 262static void vop_del_vq(struct virtqueue *vq, int n)
261{ 263{
262 struct _vop_vdev *vdev = to_vopvdev(vq->vdev); 264 struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
263 struct vring *vr = (struct vring *)(vq + 1);
264 struct vop_device *vpdev = vdev->vpdev; 265 struct vop_device *vpdev = vdev->vpdev;
265 266
266 dma_unmap_single(&vpdev->dev, vdev->used[n], 267 dma_unmap_single(&vpdev->dev, vdev->used[n],
267 vdev->used_size[n], DMA_BIDIRECTIONAL); 268 vdev->used_size[n], DMA_BIDIRECTIONAL);
268 free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); 269 free_pages((unsigned long)vdev->used_virt[n],
270 get_order(vdev->used_size[n]));
269 vring_del_virtqueue(vq); 271 vring_del_virtqueue(vq);
270 vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); 272 vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
271 vdev->vr[n] = NULL; 273 vdev->vr[n] = NULL;
@@ -283,6 +285,26 @@ static void vop_del_vqs(struct virtio_device *dev)
283 vop_del_vq(vq, idx++); 285 vop_del_vq(vq, idx++);
284} 286}
285 287
288static struct virtqueue *vop_new_virtqueue(unsigned int index,
289 unsigned int num,
290 struct virtio_device *vdev,
291 bool context,
292 void *pages,
293 bool (*notify)(struct virtqueue *vq),
294 void (*callback)(struct virtqueue *vq),
295 const char *name,
296 void *used)
297{
298 bool weak_barriers = false;
299 struct vring vring;
300
301 vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN);
302 vring.used = used;
303
304 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
305 notify, callback, name);
306}
307
286/* 308/*
287 * This routine will assign vring's allocated in host/io memory. Code in 309 * This routine will assign vring's allocated in host/io memory. Code in
288 * virtio_ring.c however continues to access this io memory as if it were local 310 * virtio_ring.c however continues to access this io memory as if it were local
@@ -302,7 +324,6 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
302 struct _mic_vring_info __iomem *info; 324 struct _mic_vring_info __iomem *info;
303 void *used; 325 void *used;
304 int vr_size, _vr_size, err, magic; 326 int vr_size, _vr_size, err, magic;
305 struct vring *vr;
306 u8 type = ioread8(&vdev->desc->type); 327 u8 type = ioread8(&vdev->desc->type);
307 328
308 if (index >= ioread8(&vdev->desc->num_vq)) 329 if (index >= ioread8(&vdev->desc->num_vq))
@@ -322,17 +343,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
322 return ERR_PTR(-ENOMEM); 343 return ERR_PTR(-ENOMEM);
323 vdev->vr[index] = va; 344 vdev->vr[index] = va;
324 memset_io(va, 0x0, _vr_size); 345 memset_io(va, 0x0, _vr_size);
325 vq = vring_new_virtqueue( 346
326 index,
327 le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
328 dev,
329 false,
330 ctx,
331 (void __force *)va, vop_notify, callback, name);
332 if (!vq) {
333 err = -ENOMEM;
334 goto unmap;
335 }
336 info = va + _vr_size; 347 info = va + _vr_size;
337 magic = ioread32(&info->magic); 348 magic = ioread32(&info->magic);
338 349
@@ -341,18 +352,27 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
341 goto unmap; 352 goto unmap;
342 } 353 }
343 354
344 /* Allocate and reassign used ring now */
345 vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + 355 vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
346 sizeof(struct vring_used_elem) * 356 sizeof(struct vring_used_elem) *
347 le16_to_cpu(config.num)); 357 le16_to_cpu(config.num));
348 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 358 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
349 get_order(vdev->used_size[index])); 359 get_order(vdev->used_size[index]));
360 vdev->used_virt[index] = used;
350 if (!used) { 361 if (!used) {
351 err = -ENOMEM; 362 err = -ENOMEM;
352 dev_err(_vop_dev(vdev), "%s %d err %d\n", 363 dev_err(_vop_dev(vdev), "%s %d err %d\n",
353 __func__, __LINE__, err); 364 __func__, __LINE__, err);
354 goto del_vq; 365 goto unmap;
355 } 366 }
367
368 vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx,
369 (void __force *)va, vop_notify, callback,
370 name, used);
371 if (!vq) {
372 err = -ENOMEM;
373 goto free_used;
374 }
375
356 vdev->used[index] = dma_map_single(&vpdev->dev, used, 376 vdev->used[index] = dma_map_single(&vpdev->dev, used,
357 vdev->used_size[index], 377 vdev->used_size[index],
358 DMA_BIDIRECTIONAL); 378 DMA_BIDIRECTIONAL);
@@ -360,26 +380,17 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
360 err = -ENOMEM; 380 err = -ENOMEM;
361 dev_err(_vop_dev(vdev), "%s %d err %d\n", 381 dev_err(_vop_dev(vdev), "%s %d err %d\n",
362 __func__, __LINE__, err); 382 __func__, __LINE__, err);
363 goto free_used; 383 goto del_vq;
364 } 384 }
365 writeq(vdev->used[index], &vqconfig->used_address); 385 writeq(vdev->used[index], &vqconfig->used_address);
366 /*
367 * To reassign the used ring here we are directly accessing
368 * struct vring_virtqueue which is a private data structure
369 * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
370 * vring_new_virtqueue() would ensure that
371 * (&vq->vring == (struct vring *) (&vq->vq + 1));
372 */
373 vr = (struct vring *)(vq + 1);
374 vr->used = used;
375 386
376 vq->priv = vdev; 387 vq->priv = vdev;
377 return vq; 388 return vq;
389del_vq:
390 vring_del_virtqueue(vq);
378free_used: 391free_used:
379 free_pages((unsigned long)used, 392 free_pages((unsigned long)used,
380 get_order(vdev->used_size[index])); 393 get_order(vdev->used_size[index]));
381del_vq:
382 vring_del_virtqueue(vq);
383unmap: 394unmap:
384 vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); 395 vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
385 return ERR_PTR(err); 396 return ERR_PTR(err);
@@ -394,16 +405,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
394 struct _vop_vdev *vdev = to_vopvdev(dev); 405 struct _vop_vdev *vdev = to_vopvdev(dev);
395 struct vop_device *vpdev = vdev->vpdev; 406 struct vop_device *vpdev = vdev->vpdev;
396 struct mic_device_ctrl __iomem *dc = vdev->dc; 407 struct mic_device_ctrl __iomem *dc = vdev->dc;
397 int i, err, retry; 408 int i, err, retry, queue_idx = 0;
398 409
399 /* We must have this many virtqueues. */ 410 /* We must have this many virtqueues. */
400 if (nvqs > ioread8(&vdev->desc->num_vq)) 411 if (nvqs > ioread8(&vdev->desc->num_vq))
401 return -ENOENT; 412 return -ENOENT;
402 413
403 for (i = 0; i < nvqs; ++i) { 414 for (i = 0; i < nvqs; ++i) {
415 if (!names[i]) {
416 vqs[i] = NULL;
417 continue;
418 }
419
404 dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", 420 dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
405 __func__, i, names[i]); 421 __func__, i, names[i]);
406 vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], 422 vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
407 ctx ? ctx[i] : false); 423 ctx ? ctx[i] : false);
408 if (IS_ERR(vqs[i])) { 424 if (IS_ERR(vqs[i])) {
409 err = PTR_ERR(vqs[i]); 425 err = PTR_ERR(vqs[i]);
@@ -576,6 +592,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
576 int ret = -1; 592 int ret = -1;
577 593
578 if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { 594 if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
595 struct device *dev = get_device(&vdev->vdev.dev);
596
579 dev_dbg(&vpdev->dev, 597 dev_dbg(&vpdev->dev,
580 "%s %d config_change %d type %d vdev %p\n", 598 "%s %d config_change %d type %d vdev %p\n",
581 __func__, __LINE__, 599 __func__, __LINE__,
@@ -587,7 +605,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
587 iowrite8(-1, &dc->h2c_vdev_db); 605 iowrite8(-1, &dc->h2c_vdev_db);
588 if (status & VIRTIO_CONFIG_S_DRIVER_OK) 606 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
589 wait_for_completion(&vdev->reset_done); 607 wait_for_completion(&vdev->reset_done);
590 put_device(&vdev->vdev.dev); 608 put_device(dev);
591 iowrite8(1, &dc->guest_ack); 609 iowrite8(1, &dc->guest_ack);
592 dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", 610 dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
593 __func__, __LINE__, ioread8(&dc->guest_ack)); 611 __func__, __LINE__, ioread8(&dc->guest_ack));
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 595ac065b401..95ff7c5a1dfb 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -70,8 +70,12 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context)
70 struct resource r; 70 struct resource r;
71 71
72 if (acpi_dev_resource_io(res, &r)) { 72 if (acpi_dev_resource_io(res, &r)) {
73#ifdef CONFIG_HAS_IOPORT_MAP
73 base = ioport_map(r.start, resource_size(&r)); 74 base = ioport_map(r.start, resource_size(&r));
74 return AE_OK; 75 return AE_OK;
76#else
77 return AE_ERROR;
78#endif
75 } else if (acpi_dev_resource_memory(res, &r)) { 79 } else if (acpi_dev_resource_memory(res, &r)) {
76 base = ioremap(r.start, resource_size(&r)); 80 base = ioremap(r.start, resource_size(&r));
77 return AE_OK; 81 return AE_OK;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index aef1185f383d..14f3fdb8c6bb 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2112,7 +2112,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
2112 if (waiting) 2112 if (waiting)
2113 wake_up(&mq->wait); 2113 wake_up(&mq->wait);
2114 else 2114 else
2115 kblockd_schedule_work(&mq->complete_work); 2115 queue_work(mq->card->complete_wq, &mq->complete_work);
2116 2116
2117 return; 2117 return;
2118 } 2118 }
@@ -2924,6 +2924,13 @@ static int mmc_blk_probe(struct mmc_card *card)
2924 2924
2925 mmc_fixup_device(card, mmc_blk_fixups); 2925 mmc_fixup_device(card, mmc_blk_fixups);
2926 2926
2927 card->complete_wq = alloc_workqueue("mmc_complete",
2928 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2929 if (unlikely(!card->complete_wq)) {
2930 pr_err("Failed to create mmc completion workqueue");
2931 return -ENOMEM;
2932 }
2933
2927 md = mmc_blk_alloc(card); 2934 md = mmc_blk_alloc(card);
2928 if (IS_ERR(md)) 2935 if (IS_ERR(md))
2929 return PTR_ERR(md); 2936 return PTR_ERR(md);
@@ -2987,6 +2994,7 @@ static void mmc_blk_remove(struct mmc_card *card)
2987 pm_runtime_put_noidle(&card->dev); 2994 pm_runtime_put_noidle(&card->dev);
2988 mmc_blk_remove_req(md); 2995 mmc_blk_remove_req(md);
2989 dev_set_drvdata(&card->dev, NULL); 2996 dev_set_drvdata(&card->dev, NULL);
2997 destroy_workqueue(card->complete_wq);
2990} 2998}
2991 2999
2992static int _mmc_blk_suspend(struct mmc_card *card) 3000static int _mmc_blk_suspend(struct mmc_card *card)
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index f57f5de54206..cf58ccaf22d5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -234,7 +234,7 @@ int mmc_of_parse(struct mmc_host *host)
234 if (device_property_read_bool(dev, "broken-cd")) 234 if (device_property_read_bool(dev, "broken-cd"))
235 host->caps |= MMC_CAP_NEEDS_POLL; 235 host->caps |= MMC_CAP_NEEDS_POLL;
236 236
237 ret = mmc_gpiod_request_cd(host, "cd", 0, true, 237 ret = mmc_gpiod_request_cd(host, "cd", 0, false,
238 cd_debounce_delay_ms * 1000, 238 cd_debounce_delay_ms * 1000,
239 &cd_gpio_invert); 239 &cd_gpio_invert);
240 if (!ret) 240 if (!ret)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e26b8145efb3..a44ec8bb5418 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -116,7 +116,7 @@ config MMC_RICOH_MMC
116 116
117config MMC_SDHCI_ACPI 117config MMC_SDHCI_ACPI
118 tristate "SDHCI support for ACPI enumerated SDHCI controllers" 118 tristate "SDHCI support for ACPI enumerated SDHCI controllers"
119 depends on MMC_SDHCI && ACPI 119 depends on MMC_SDHCI && ACPI && PCI
120 select IOSF_MBI if X86 120 select IOSF_MBI if X86
121 help 121 help
122 This selects support for ACPI enumerated SDHCI controllers, 122 This selects support for ACPI enumerated SDHCI controllers,
@@ -978,7 +978,7 @@ config MMC_SDHCI_OMAP
978 tristate "TI SDHCI Controller Support" 978 tristate "TI SDHCI Controller Support"
979 depends on MMC_SDHCI_PLTFM && OF 979 depends on MMC_SDHCI_PLTFM && OF
980 select THERMAL 980 select THERMAL
981 select TI_SOC_THERMAL 981 imply TI_SOC_THERMAL
982 help 982 help
983 This selects the Secure Digital Host Controller Interface (SDHCI) 983 This selects the Secure Digital Host Controller Interface (SDHCI)
984 support present in TI's DRA7 SOCs. The controller supports 984 support present in TI's DRA7 SOCs. The controller supports
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 50293529d6de..c9e7aa50bb0a 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1431,6 +1431,8 @@ static int bcm2835_probe(struct platform_device *pdev)
1431 1431
1432err: 1432err:
1433 dev_dbg(dev, "%s -> err %d\n", __func__, ret); 1433 dev_dbg(dev, "%s -> err %d\n", __func__, ret);
1434 if (host->dma_chan_rxtx)
1435 dma_release_channel(host->dma_chan_rxtx);
1434 mmc_free_host(mmc); 1436 mmc_free_host(mmc);
1435 1437
1436 return ret; 1438 return ret;
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
index ed8f2254b66a..aa38b1a8017e 100644
--- a/drivers/mmc/host/dw_mmc-bluefield.c
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
@@ -1,11 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (C) 2018 Mellanox Technologies. 3 * Copyright (C) 2018 Mellanox Technologies.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */ 4 */
10 5
11#include <linux/bitfield.h> 6#include <linux/bitfield.h>
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c2690c1a50ff..2eba507790e4 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -179,6 +179,8 @@ struct meson_host {
179 struct sd_emmc_desc *descs; 179 struct sd_emmc_desc *descs;
180 dma_addr_t descs_dma_addr; 180 dma_addr_t descs_dma_addr;
181 181
182 int irq;
183
182 bool vqmmc_enabled; 184 bool vqmmc_enabled;
183}; 185};
184 186
@@ -738,6 +740,11 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
738static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 740static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
739{ 741{
740 struct meson_host *host = mmc_priv(mmc); 742 struct meson_host *host = mmc_priv(mmc);
743 int adj = 0;
744
745 /* enable signal resampling w/o delay */
746 adj = ADJUST_ADJ_EN;
747 writel(adj, host->regs + host->data->adjust);
741 748
742 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); 749 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
743} 750}
@@ -768,6 +775,9 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
768 if (!IS_ERR(mmc->supply.vmmc)) 775 if (!IS_ERR(mmc->supply.vmmc))
769 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 776 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
770 777
778 /* disable signal resampling */
779 writel(0, host->regs + host->data->adjust);
780
771 /* Reset rx phase */ 781 /* Reset rx phase */
772 clk_set_phase(host->rx_clk, 0); 782 clk_set_phase(host->rx_clk, 0);
773 783
@@ -1166,7 +1176,7 @@ static int meson_mmc_get_cd(struct mmc_host *mmc)
1166 1176
1167static void meson_mmc_cfg_init(struct meson_host *host) 1177static void meson_mmc_cfg_init(struct meson_host *host)
1168{ 1178{
1169 u32 cfg = 0, adj = 0; 1179 u32 cfg = 0;
1170 1180
1171 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, 1181 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
1172 ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); 1182 ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
@@ -1177,10 +1187,6 @@ static void meson_mmc_cfg_init(struct meson_host *host)
1177 cfg |= CFG_ERR_ABORT; 1187 cfg |= CFG_ERR_ABORT;
1178 1188
1179 writel(cfg, host->regs + SD_EMMC_CFG); 1189 writel(cfg, host->regs + SD_EMMC_CFG);
1180
1181 /* enable signal resampling w/o delay */
1182 adj = ADJUST_ADJ_EN;
1183 writel(adj, host->regs + host->data->adjust);
1184} 1190}
1185 1191
1186static int meson_mmc_card_busy(struct mmc_host *mmc) 1192static int meson_mmc_card_busy(struct mmc_host *mmc)
@@ -1231,7 +1237,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1231 struct resource *res; 1237 struct resource *res;
1232 struct meson_host *host; 1238 struct meson_host *host;
1233 struct mmc_host *mmc; 1239 struct mmc_host *mmc;
1234 int ret, irq; 1240 int ret;
1235 1241
1236 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); 1242 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
1237 if (!mmc) 1243 if (!mmc)
@@ -1276,8 +1282,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1276 goto free_host; 1282 goto free_host;
1277 } 1283 }
1278 1284
1279 irq = platform_get_irq(pdev, 0); 1285 host->irq = platform_get_irq(pdev, 0);
1280 if (irq <= 0) { 1286 if (host->irq <= 0) {
1281 dev_err(&pdev->dev, "failed to get interrupt resource.\n"); 1287 dev_err(&pdev->dev, "failed to get interrupt resource.\n");
1282 ret = -EINVAL; 1288 ret = -EINVAL;
1283 goto free_host; 1289 goto free_host;
@@ -1331,9 +1337,9 @@ static int meson_mmc_probe(struct platform_device *pdev)
1331 writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, 1337 writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
1332 host->regs + SD_EMMC_IRQ_EN); 1338 host->regs + SD_EMMC_IRQ_EN);
1333 1339
1334 ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, 1340 ret = request_threaded_irq(host->irq, meson_mmc_irq,
1335 meson_mmc_irq_thread, IRQF_SHARED, 1341 meson_mmc_irq_thread, IRQF_SHARED,
1336 NULL, host); 1342 dev_name(&pdev->dev), host);
1337 if (ret) 1343 if (ret)
1338 goto err_init_clk; 1344 goto err_init_clk;
1339 1345
@@ -1351,7 +1357,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1351 if (host->bounce_buf == NULL) { 1357 if (host->bounce_buf == NULL) {
1352 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); 1358 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
1353 ret = -ENOMEM; 1359 ret = -ENOMEM;
1354 goto err_init_clk; 1360 goto err_free_irq;
1355 } 1361 }
1356 1362
1357 host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 1363 host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
@@ -1370,6 +1376,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1370err_bounce_buf: 1376err_bounce_buf:
1371 dma_free_coherent(host->dev, host->bounce_buf_size, 1377 dma_free_coherent(host->dev, host->bounce_buf_size,
1372 host->bounce_buf, host->bounce_dma_addr); 1378 host->bounce_buf, host->bounce_dma_addr);
1379err_free_irq:
1380 free_irq(host->irq, host);
1373err_init_clk: 1381err_init_clk:
1374 clk_disable_unprepare(host->mmc_clk); 1382 clk_disable_unprepare(host->mmc_clk);
1375err_core_clk: 1383err_core_clk:
@@ -1387,6 +1395,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
1387 1395
1388 /* disable interrupts */ 1396 /* disable interrupts */
1389 writel(0, host->regs + SD_EMMC_IRQ_EN); 1397 writel(0, host->regs + SD_EMMC_IRQ_EN);
1398 free_irq(host->irq, host);
1390 1399
1391 dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 1400 dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1392 host->descs, host->descs_dma_addr); 1401 host->descs, host->descs_dma_addr);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 8afeaf81ae66..833ef0590af8 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
846 846
847 if (timing == MMC_TIMING_MMC_HS400 && 847 if (timing == MMC_TIMING_MMC_HS400 &&
848 host->dev_comp->hs400_tune) 848 host->dev_comp->hs400_tune)
849 sdr_set_field(host->base + PAD_CMD_TUNE, 849 sdr_set_field(host->base + tune_reg,
850 MSDC_PAD_TUNE_CMDRRDLY, 850 MSDC_PAD_TUNE_CMDRRDLY,
851 host->hs400_cmd_int_delay); 851 host->hs400_cmd_int_delay);
852 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, 852 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 0db99057c44f..9d12c06c7fd6 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
296 296
297 iproc_host->data = iproc_data; 297 iproc_host->data = iproc_data;
298 298
299 mmc_of_parse(host->mmc); 299 ret = mmc_of_parse(host->mmc);
300 if (ret)
301 goto err;
302
300 sdhci_get_property(pdev); 303 sdhci_get_property(pdev);
301 304
302 host->mmc->caps |= iproc_host->data->mmc_caps; 305 host->mmc->caps |= iproc_host->data->mmc_caps;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a22e11a65658..eba9bcc92ad3 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3763,8 +3763,9 @@ int sdhci_setup_host(struct sdhci_host *host)
3763 * Use zalloc to zero the reserved high 32-bits of 128-bit 3763 * Use zalloc to zero the reserved high 32-bits of 128-bit
3764 * descriptors so that they never need to be written. 3764 * descriptors so that they never need to be written.
3765 */ 3765 */
3766 buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz + 3766 buf = dma_alloc_coherent(mmc_dev(mmc),
3767 host->adma_table_sz, &dma, GFP_KERNEL); 3767 host->align_buffer_sz + host->adma_table_sz,
3768 &dma, GFP_KERNEL);
3768 if (!buf) { 3769 if (!buf) {
3769 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 3770 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3770 mmc_hostname(mmc)); 3771 mmc_hostname(mmc));
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 279e326e397e..70fadc976795 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1399,13 +1399,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
1399 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 1399 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1400 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1400 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1401 1401
1402 if (host->cfg->clk_delays || host->use_new_timings) 1402 /*
1403 * Some H5 devices do not have signal traces precise enough to
1404 * use HS DDR mode for their eMMC chips.
1405 *
1406 * We still enable HS DDR modes for all the other controller
1407 * variants that support them.
1408 */
1409 if ((host->cfg->clk_delays || host->use_new_timings) &&
1410 !of_device_is_compatible(pdev->dev.of_node,
1411 "allwinner,sun50i-h5-emmc"))
1403 mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; 1412 mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
1404 1413
1405 ret = mmc_of_parse(mmc); 1414 ret = mmc_of_parse(mmc);
1406 if (ret) 1415 if (ret)
1407 goto error_free_dma; 1416 goto error_free_dma;
1408 1417
1418 /*
1419 * If we don't support delay chains in the SoC, we can't use any
1420 * of the higher speed modes. Mask them out in case the device
1421 * tree specifies the properties for them, which gets added to
1422 * the caps by mmc_of_parse() above.
1423 */
1424 if (!(host->cfg->clk_delays || host->use_new_timings)) {
1425 mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR |
1426 MMC_CAP_1_2V_DDR | MMC_CAP_UHS);
1427 mmc->caps2 &= ~MMC_CAP2_HS200;
1428 }
1429
1430 /* TODO: This driver doesn't support HS400 mode yet */
1431 mmc->caps2 &= ~MMC_CAP2_HS400;
1432
1409 ret = sunxi_mmc_init_host(host); 1433 ret = sunxi_mmc_init_host(host);
1410 if (ret) 1434 if (ret)
1411 goto error_free_dma; 1435 goto error_free_dma;
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 22f753e555ac..83f88b8b5d9f 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev,
212 * Going to have to check what details I need to set and how to 212 * Going to have to check what details I need to set and how to
213 * get them 213 * get them
214 */ 214 */
215 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node); 215 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
216 mtd->type = MTD_NORFLASH; 216 mtd->type = MTD_NORFLASH;
217 mtd->flags = MTD_WRITEABLE; 217 mtd->flags = MTD_WRITEABLE;
218 mtd->size = size; 218 mtd->size = size;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 21e3cdc04036..3ef01baef9b6 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -507,6 +507,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
507{ 507{
508 struct nvmem_config config = {}; 508 struct nvmem_config config = {};
509 509
510 config.id = -1;
510 config.dev = &mtd->dev; 511 config.dev = &mtd->dev;
511 config.name = mtd->name; 512 config.name = mtd->name;
512 config.owner = THIS_MODULE; 513 config.owner = THIS_MODULE;
@@ -522,7 +523,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
522 mtd->nvmem = nvmem_register(&config); 523 mtd->nvmem = nvmem_register(&config);
523 if (IS_ERR(mtd->nvmem)) { 524 if (IS_ERR(mtd->nvmem)) {
524 /* Just ignore if there is no NVMEM support in the kernel */ 525 /* Just ignore if there is no NVMEM support in the kernel */
525 if (PTR_ERR(mtd->nvmem) == -ENOSYS) { 526 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
526 mtd->nvmem = NULL; 527 mtd->nvmem = NULL;
527 } else { 528 } else {
528 dev_err(&mtd->dev, "Failed to register NVMEM device\n"); 529 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 9887bda317cd..b31c868019ad 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -7,7 +7,7 @@
7extern struct mutex mtd_table_mutex; 7extern struct mutex mtd_table_mutex;
8 8
9struct mtd_info *__mtd_next_device(int i); 9struct mtd_info *__mtd_next_device(int i);
10int add_mtd_device(struct mtd_info *mtd); 10int __must_check add_mtd_device(struct mtd_info *mtd);
11int del_mtd_device(struct mtd_info *mtd); 11int del_mtd_device(struct mtd_info *mtd);
12int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); 12int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
13int del_mtd_partitions(struct mtd_info *); 13int del_mtd_partitions(struct mtd_info *);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index b6af41b04622..37f174ccbcec 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -480,6 +480,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
480 /* let's register it anyway to preserve ordering */ 480 /* let's register it anyway to preserve ordering */
481 slave->offset = 0; 481 slave->offset = 0;
482 slave->mtd.size = 0; 482 slave->mtd.size = 0;
483
484 /* Initialize ->erasesize to make add_mtd_device() happy. */
485 slave->mtd.erasesize = parent->erasesize;
486
483 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 487 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
484 part->name); 488 part->name);
485 goto out_register; 489 goto out_register;
@@ -618,10 +622,21 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
618 list_add(&new->list, &mtd_partitions); 622 list_add(&new->list, &mtd_partitions);
619 mutex_unlock(&mtd_partitions_mutex); 623 mutex_unlock(&mtd_partitions_mutex);
620 624
621 add_mtd_device(&new->mtd); 625 ret = add_mtd_device(&new->mtd);
626 if (ret)
627 goto err_remove_part;
622 628
623 mtd_add_partition_attrs(new); 629 mtd_add_partition_attrs(new);
624 630
631 return 0;
632
633err_remove_part:
634 mutex_lock(&mtd_partitions_mutex);
635 list_del(&new->list);
636 mutex_unlock(&mtd_partitions_mutex);
637
638 free_partition(new);
639
625 return ret; 640 return ret;
626} 641}
627EXPORT_SYMBOL_GPL(mtd_add_partition); 642EXPORT_SYMBOL_GPL(mtd_add_partition);
@@ -712,22 +727,31 @@ int add_mtd_partitions(struct mtd_info *master,
712{ 727{
713 struct mtd_part *slave; 728 struct mtd_part *slave;
714 uint64_t cur_offset = 0; 729 uint64_t cur_offset = 0;
715 int i; 730 int i, ret;
716 731
717 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 732 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
718 733
719 for (i = 0; i < nbparts; i++) { 734 for (i = 0; i < nbparts; i++) {
720 slave = allocate_partition(master, parts + i, i, cur_offset); 735 slave = allocate_partition(master, parts + i, i, cur_offset);
721 if (IS_ERR(slave)) { 736 if (IS_ERR(slave)) {
722 del_mtd_partitions(master); 737 ret = PTR_ERR(slave);
723 return PTR_ERR(slave); 738 goto err_del_partitions;
724 } 739 }
725 740
726 mutex_lock(&mtd_partitions_mutex); 741 mutex_lock(&mtd_partitions_mutex);
727 list_add(&slave->list, &mtd_partitions); 742 list_add(&slave->list, &mtd_partitions);
728 mutex_unlock(&mtd_partitions_mutex); 743 mutex_unlock(&mtd_partitions_mutex);
729 744
730 add_mtd_device(&slave->mtd); 745 ret = add_mtd_device(&slave->mtd);
746 if (ret) {
747 mutex_lock(&mtd_partitions_mutex);
748 list_del(&slave->list);
749 mutex_unlock(&mtd_partitions_mutex);
750
751 free_partition(slave);
752 goto err_del_partitions;
753 }
754
731 mtd_add_partition_attrs(slave); 755 mtd_add_partition_attrs(slave);
732 /* Look for subpartitions */ 756 /* Look for subpartitions */
733 parse_mtd_partitions(&slave->mtd, parts[i].types, NULL); 757 parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
@@ -736,6 +760,11 @@ int add_mtd_partitions(struct mtd_info *master,
736 } 760 }
737 761
738 return 0; 762 return 0;
763
764err_del_partitions:
765 del_mtd_partitions(master);
766
767 return ret;
739} 768}
740 769
741static DEFINE_SPINLOCK(part_parser_lock); 770static DEFINE_SPINLOCK(part_parser_lock);
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index eebac35304c6..6e8edc9375dd 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1322,7 +1322,7 @@ int denali_init(struct denali_nand_info *denali)
1322 } 1322 }
1323 1323
1324 /* clk rate info is needed for setup_data_interface */ 1324 /* clk rate info is needed for setup_data_interface */
1325 if (denali->clk_rate && denali->clk_x_rate) 1325 if (!denali->clk_rate || !denali->clk_x_rate)
1326 chip->options |= NAND_KEEP_TIMINGS; 1326 chip->options |= NAND_KEEP_TIMINGS;
1327 1327
1328 chip->legacy.dummy_controller.ops = &denali_controller_ops; 1328 chip->legacy.dummy_controller.ops = &denali_controller_ops;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 325b4414dccc..c9149a37f8f0 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -593,23 +593,6 @@ static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
593 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); 593 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
594} 594}
595 595
596/* fsmc_select_chip - assert or deassert nCE */
597static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert)
598{
599 u32 pc = readl(host->regs_va + FSMC_PC);
600
601 if (!assert)
602 writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC);
603 else
604 writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC);
605
606 /*
607 * nCE line changes must be applied before returning from this
608 * function.
609 */
610 mb();
611}
612
613/* 596/*
614 * fsmc_exec_op - hook called by the core to execute NAND operations 597 * fsmc_exec_op - hook called by the core to execute NAND operations
615 * 598 *
@@ -627,8 +610,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
627 610
628 pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); 611 pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
629 612
630 fsmc_ce_ctrl(host, true);
631
632 for (op_id = 0; op_id < op->ninstrs; op_id++) { 613 for (op_id = 0; op_id < op->ninstrs; op_id++) {
633 instr = &op->instrs[op_id]; 614 instr = &op->instrs[op_id];
634 615
@@ -686,8 +667,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
686 } 667 }
687 } 668 }
688 669
689 fsmc_ce_ctrl(host, false);
690
691 return ret; 670 return ret;
692} 671}
693 672
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index bd4cfac6b5aa..a4768df5083f 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this)
155 155
156 /* 156 /*
157 * Reset BCH here, too. We got failures otherwise :( 157 * Reset BCH here, too. We got failures otherwise :(
158 * See later BCH reset for explanation of MX23 handling 158 * See later BCH reset for explanation of MX23 and MX28 handling
159 */ 159 */
160 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); 160 ret = gpmi_reset_block(r->bch_regs,
161 GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
161 if (ret) 162 if (ret)
162 goto err_out; 163 goto err_out;
163 164
@@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this)
263 /* 264 /*
264 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this 265 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
265 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. 266 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
266 * On the other hand, the MX28 needs the reset, because one case has been 267 * and MX28.
267 * seen where the BCH produced ECC errors constantly after 10000
268 * consecutive reboots. The latter case has not been seen on the MX23
269 * yet, still we don't know if it could happen there as well.
270 */ 268 */
271 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); 269 ret = gpmi_reset_block(r->bch_regs,
270 GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
272 if (ret) 271 if (ret)
273 goto err_out; 272 goto err_out;
274 273
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index f92ae5aa2a54..9526d5b23c80 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -260,7 +260,7 @@ static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat,
260} 260}
261 261
262static int jz_nand_ioremap_resource(struct platform_device *pdev, 262static int jz_nand_ioremap_resource(struct platform_device *pdev,
263 const char *name, struct resource **res, void *__iomem *base) 263 const char *name, struct resource **res, void __iomem **base)
264{ 264{
265 int ret; 265 int ret;
266 266
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index cca4b24d2ffa..839494ac457c 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -410,6 +410,7 @@ static int nand_check_wp(struct nand_chip *chip)
410 410
411/** 411/**
412 * nand_fill_oob - [INTERN] Transfer client buffer to oob 412 * nand_fill_oob - [INTERN] Transfer client buffer to oob
413 * @chip: NAND chip object
413 * @oob: oob data buffer 414 * @oob: oob data buffer
414 * @len: oob data write length 415 * @len: oob data write length
415 * @ops: oob ops structure 416 * @ops: oob ops structure
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index 1b722fe9213c..19a2b563acdf 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -158,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
158 158
159/** 159/**
160 * read_bbt - [GENERIC] Read the bad block table starting from page 160 * read_bbt - [GENERIC] Read the bad block table starting from page
161 * @chip: NAND chip object 161 * @this: NAND chip object
162 * @buf: temporary buffer 162 * @buf: temporary buffer
163 * @page: the starting page 163 * @page: the starting page
164 * @num: the number of bbt descriptors to read 164 * @num: the number of bbt descriptors to read
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 46c62a31fa46..920e7375084f 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2833,6 +2833,16 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2833 if (ret) 2833 if (ret)
2834 return ret; 2834 return ret;
2835 2835
2836 if (nandc->props->is_bam) {
2837 free_bam_transaction(nandc);
2838 nandc->bam_txn = alloc_bam_transaction(nandc);
2839 if (!nandc->bam_txn) {
2840 dev_err(nandc->dev,
2841 "failed to allocate bam transaction\n");
2842 return -ENOMEM;
2843 }
2844 }
2845
2836 ret = mtd_device_register(mtd, NULL, 0); 2846 ret = mtd_device_register(mtd, NULL, 0);
2837 if (ret) 2847 if (ret)
2838 nand_cleanup(chip); 2848 nand_cleanup(chip);
@@ -2847,16 +2857,6 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2847 struct qcom_nand_host *host; 2857 struct qcom_nand_host *host;
2848 int ret; 2858 int ret;
2849 2859
2850 if (nandc->props->is_bam) {
2851 free_bam_transaction(nandc);
2852 nandc->bam_txn = alloc_bam_transaction(nandc);
2853 if (!nandc->bam_txn) {
2854 dev_err(nandc->dev,
2855 "failed to allocate bam transaction\n");
2856 return -ENOMEM;
2857 }
2858 }
2859
2860 for_each_available_child_of_node(dn, child) { 2860 for_each_available_child_of_node(dn, child) {
2861 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 2861 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2862 if (!host) { 2862 if (!host) {
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 479c2f2cf17f..fa87ae28cdfe 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
304 struct nand_device *nand = spinand_to_nand(spinand); 304 struct nand_device *nand = spinand_to_nand(spinand);
305 struct mtd_info *mtd = nanddev_to_mtd(nand); 305 struct mtd_info *mtd = nanddev_to_mtd(nand);
306 struct nand_page_io_req adjreq = *req; 306 struct nand_page_io_req adjreq = *req;
307 unsigned int nbytes = 0; 307 void *buf = spinand->databuf;
308 void *buf = NULL; 308 unsigned int nbytes;
309 u16 column = 0; 309 u16 column = 0;
310 int ret; 310 int ret;
311 311
312 memset(spinand->databuf, 0xff, 312 /*
313 nanddev_page_size(nand) + 313 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
314 nanddev_per_page_oobsize(nand)); 314 * the cache content to 0xFF (depends on vendor implementation), so we
315 * must fill the page cache entirely even if we only want to program
316 * the data portion of the page, otherwise we might corrupt the BBM or
317 * user data previously programmed in OOB area.
318 */
319 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
320 memset(spinand->databuf, 0xff, nbytes);
321 adjreq.dataoffs = 0;
322 adjreq.datalen = nanddev_page_size(nand);
323 adjreq.databuf.out = spinand->databuf;
324 adjreq.ooblen = nanddev_per_page_oobsize(nand);
325 adjreq.ooboffs = 0;
326 adjreq.oobbuf.out = spinand->oobbuf;
315 327
316 if (req->datalen) { 328 if (req->datalen)
317 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 329 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
318 req->datalen); 330 req->datalen);
319 adjreq.dataoffs = 0;
320 adjreq.datalen = nanddev_page_size(nand);
321 adjreq.databuf.out = spinand->databuf;
322 nbytes = adjreq.datalen;
323 buf = spinand->databuf;
324 }
325 331
326 if (req->ooblen) { 332 if (req->ooblen) {
327 if (req->mode == MTD_OPS_AUTO_OOB) 333 if (req->mode == MTD_OPS_AUTO_OOB)
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
332 else 338 else
333 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 339 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
334 req->ooblen); 340 req->ooblen);
335
336 adjreq.ooblen = nanddev_per_page_oobsize(nand);
337 adjreq.ooboffs = 0;
338 nbytes += nanddev_per_page_oobsize(nand);
339 if (!buf) {
340 buf = spinand->oobbuf;
341 column = nanddev_page_size(nand);
342 }
343 } 341 }
344 342
345 spinand_cache_op_adjust_colum(spinand, &adjreq, &column); 343 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
370 368
371 /* 369 /*
372 * We need to use the RANDOM LOAD CACHE operation if there's 370 * We need to use the RANDOM LOAD CACHE operation if there's
373 * more than one iteration, because the LOAD operation resets 371 * more than one iteration, because the LOAD operation might
374 * the cache to 0xff. 372 * reset the cache to 0xff.
375 */ 373 */
376 if (nbytes) { 374 if (nbytes) {
377 column = op.addr.val; 375 column = op.addr.val;
@@ -1018,11 +1016,11 @@ static int spinand_init(struct spinand_device *spinand)
1018 for (i = 0; i < nand->memorg.ntargets; i++) { 1016 for (i = 0; i < nand->memorg.ntargets; i++) {
1019 ret = spinand_select_target(spinand, i); 1017 ret = spinand_select_target(spinand, i);
1020 if (ret) 1018 if (ret)
1021 goto err_free_bufs; 1019 goto err_manuf_cleanup;
1022 1020
1023 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 1021 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1024 if (ret) 1022 if (ret)
1025 goto err_free_bufs; 1023 goto err_manuf_cleanup;
1026 } 1024 }
1027 1025
1028 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); 1026 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6371958dd170..21bf8ac78380 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -197,9 +197,9 @@ config VXLAN
197 197
198config GENEVE 198config GENEVE
199 tristate "Generic Network Virtualization Encapsulation" 199 tristate "Generic Network Virtualization Encapsulation"
200 depends on INET && NET_UDP_TUNNEL 200 depends on INET
201 depends on IPV6 || !IPV6 201 depends on IPV6 || !IPV6
202 select NET_IP_TUNNEL 202 select NET_UDP_TUNNEL
203 select GRO_CELLS 203 select GRO_CELLS
204 ---help--- 204 ---help---
205 This allows one to create geneve virtual interfaces that provide 205 This allows one to create geneve virtual interfaces that provide
@@ -519,7 +519,7 @@ config NET_FAILOVER
519 and destroy a failover master netdev and manages a primary and 519 and destroy a failover master netdev and manages a primary and
520 standby slave netdevs that get registered via the generic failover 520 standby slave netdevs that get registered via the generic failover
521 infrastructure. This can be used by paravirtual drivers to enable 521 infrastructure. This can be used by paravirtual drivers to enable
522 an alternate low latency datapath. It alsoenables live migration of 522 an alternate low latency datapath. It also enables live migration of
523 a VM with direct attached VF by failing over to the paravirtual 523 a VM with direct attached VF by failing over to the paravirtual
524 datapath when the VF is unplugged. 524 datapath when the VF is unplugged.
525 525
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a9d597f28023..537c90c8eb0a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1183,29 +1183,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1183 } 1183 }
1184 } 1184 }
1185 1185
1186 /* Link-local multicast packets should be passed to the 1186 /*
1187 * stack on the link they arrive as well as pass them to the 1187 * For packets determined by bond_should_deliver_exact_match() call to
1188 * bond-master device. These packets are mostly usable when 1188 * be suppressed we want to make an exception for link-local packets.
1189 * stack receives it with the link on which they arrive 1189 * This is necessary for e.g. LLDP daemons to be able to monitor
1190 * (e.g. LLDP) they also must be available on master. Some of 1190 * inactive slave links without being forced to bind to them
1191 * the use cases include (but are not limited to): LLDP agents 1191 * explicitly.
1192 * that must be able to operate both on enslaved interfaces as 1192 *
1193 * well as on bonds themselves; linux bridges that must be able 1193 * At the same time, packets that are passed to the bonding master
1194 * to process/pass BPDUs from attached bonds when any kind of 1194 * (including link-local ones) can have their originating interface
1195 * STP version is enabled on the network. 1195 * determined via PACKET_ORIGDEV socket option.
1196 */ 1196 */
1197 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { 1197 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1198 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1198 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1199 1199 return RX_HANDLER_PASS;
1200 if (nskb) {
1201 nskb->dev = bond->dev;
1202 nskb->queue_mapping = 0;
1203 netif_rx(nskb);
1204 }
1205 return RX_HANDLER_PASS;
1206 }
1207 if (bond_should_deliver_exact_match(skb, slave, bond))
1208 return RX_HANDLER_EXACT; 1200 return RX_HANDLER_EXACT;
1201 }
1209 1202
1210 skb->dev = bond->dev; 1203 skb->dev = bond->dev;
1211 1204
@@ -1963,6 +1956,9 @@ static int __bond_release_one(struct net_device *bond_dev,
1963 if (!bond_has_slaves(bond)) { 1956 if (!bond_has_slaves(bond)) {
1964 bond_set_carrier(bond); 1957 bond_set_carrier(bond);
1965 eth_hw_addr_random(bond_dev); 1958 eth_hw_addr_random(bond_dev);
1959 bond->nest_level = SINGLE_DEPTH_NESTING;
1960 } else {
1961 bond->nest_level = dev_get_nest_level(bond_dev) + 1;
1966 } 1962 }
1967 1963
1968 unblock_netpoll_tx(); 1964 unblock_netpoll_tx();
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index a0f954f36c09..44e6c7b1b222 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -257,10 +257,7 @@ static int handle_tx(struct ser_device *ser)
257 if (skb->len == 0) { 257 if (skb->len == 0) {
258 struct sk_buff *tmp = skb_dequeue(&ser->head); 258 struct sk_buff *tmp = skb_dequeue(&ser->head);
259 WARN_ON(tmp != skb); 259 WARN_ON(tmp != skb);
260 if (in_interrupt()) 260 dev_consume_skb_any(skb);
261 dev_kfree_skb_irq(skb);
262 else
263 kfree_skb(skb);
264 } 261 }
265 } 262 }
266 /* Send flow off if queue is empty */ 263 /* Send flow off if queue is empty */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3b3f88ffab53..c05e4d50d43d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
480struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) 480struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
481{ 481{
482 struct can_priv *priv = netdev_priv(dev); 482 struct can_priv *priv = netdev_priv(dev);
483 struct sk_buff *skb = priv->echo_skb[idx];
484 struct canfd_frame *cf;
485 483
486 if (idx >= priv->echo_skb_max) { 484 if (idx >= priv->echo_skb_max) {
487 netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", 485 netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
@@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
489 return NULL; 487 return NULL;
490 } 488 }
491 489
492 if (!skb) { 490 if (priv->echo_skb[idx]) {
493 netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", 491 /* Using "struct canfd_frame::len" for the frame
494 __func__, idx); 492 * length is supported on both CAN and CANFD frames.
495 return NULL; 493 */
496 } 494 struct sk_buff *skb = priv->echo_skb[idx];
495 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
496 u8 len = cf->len;
497 497
498 /* Using "struct canfd_frame::len" for the frame 498 *len_ptr = len;
499 * length is supported on both CAN and CANFD frames. 499 priv->echo_skb[idx] = NULL;
500 */
501 cf = (struct canfd_frame *)skb->data;
502 *len_ptr = cf->len;
503 priv->echo_skb[idx] = NULL;
504 500
505 return skb; 501 return skb;
502 }
503
504 return NULL;
506} 505}
507 506
508/* 507/*
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0f36eafe3ac1..1c66fb2ad76b 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1106,7 +1106,7 @@ static int flexcan_chip_start(struct net_device *dev)
1106 } 1106 }
1107 } else { 1107 } else {
1108 /* clear and invalidate unused mailboxes first */ 1108 /* clear and invalidate unused mailboxes first */
1109 for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) { 1109 for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) {
1110 mb = flexcan_get_mb(priv, i); 1110 mb = flexcan_get_mb(priv, i);
1111 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, 1111 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
1112 &mb->can_ctrl); 1112 &mb->can_ctrl);
@@ -1432,7 +1432,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
1432 gpr_np = of_find_node_by_phandle(phandle); 1432 gpr_np = of_find_node_by_phandle(phandle);
1433 if (!gpr_np) { 1433 if (!gpr_np) {
1434 dev_dbg(&pdev->dev, "could not find gpr node by phandle\n"); 1434 dev_dbg(&pdev->dev, "could not find gpr node by phandle\n");
1435 return PTR_ERR(gpr_np); 1435 return -ENODEV;
1436 } 1436 }
1437 1437
1438 priv = netdev_priv(dev); 1438 priv = netdev_priv(dev);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0e4bbdcc614f..c76892ac4e69 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
344 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 344 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
345} 345}
346 346
347static void b53_enable_vlan(struct b53_device *dev, bool enable) 347static void b53_enable_vlan(struct b53_device *dev, bool enable,
348 bool enable_filtering)
348{ 349{
349 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 350 u8 mgmt, vc0, vc1, vc4 = 0, vc5;
350 351
@@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
369 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 370 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
370 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; 371 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
371 vc4 &= ~VC4_ING_VID_CHECK_MASK; 372 vc4 &= ~VC4_ING_VID_CHECK_MASK;
372 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 373 if (enable_filtering) {
373 vc5 |= VC5_DROP_VTABLE_MISS; 374 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
375 vc5 |= VC5_DROP_VTABLE_MISS;
376 } else {
377 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
378 vc5 &= ~VC5_DROP_VTABLE_MISS;
379 }
374 380
375 if (is5325(dev)) 381 if (is5325(dev))
376 vc0 &= ~VC0_RESERVED_1; 382 vc0 &= ~VC0_RESERVED_1;
@@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
420 } 426 }
421 427
422 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 428 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
429
430 dev->vlan_enabled = enable;
431 dev->vlan_filtering_enabled = enable_filtering;
423} 432}
424 433
425static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 434static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev)
632 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 641 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
633} 642}
634 643
644static u16 b53_default_pvid(struct b53_device *dev)
645{
646 if (is5325(dev) || is5365(dev))
647 return 1;
648 else
649 return 0;
650}
651
635int b53_configure_vlan(struct dsa_switch *ds) 652int b53_configure_vlan(struct dsa_switch *ds)
636{ 653{
637 struct b53_device *dev = ds->priv; 654 struct b53_device *dev = ds->priv;
638 struct b53_vlan vl = { 0 }; 655 struct b53_vlan vl = { 0 };
639 int i; 656 int i, def_vid;
657
658 def_vid = b53_default_pvid(dev);
640 659
641 /* clear all vlan entries */ 660 /* clear all vlan entries */
642 if (is5325(dev) || is5365(dev)) { 661 if (is5325(dev) || is5365(dev)) {
643 for (i = 1; i < dev->num_vlans; i++) 662 for (i = def_vid; i < dev->num_vlans; i++)
644 b53_set_vlan_entry(dev, i, &vl); 663 b53_set_vlan_entry(dev, i, &vl);
645 } else { 664 } else {
646 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 665 b53_do_vlan_op(dev, VTA_CMD_CLEAR);
647 } 666 }
648 667
649 b53_enable_vlan(dev, false); 668 b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
650 669
651 b53_for_each_port(dev, i) 670 b53_for_each_port(dev, i)
652 b53_write16(dev, B53_VLAN_PAGE, 671 b53_write16(dev, B53_VLAN_PAGE,
653 B53_VLAN_PORT_DEF_TAG(i), 1); 672 B53_VLAN_PORT_DEF_TAG(i), def_vid);
654 673
655 if (!is5325(dev) && !is5365(dev)) 674 if (!is5325(dev) && !is5365(dev))
656 b53_set_jumbo(dev, dev->enable_jumbo, false); 675 b53_set_jumbo(dev, dev->enable_jumbo, false);
@@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
1255 1274
1256int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) 1275int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
1257{ 1276{
1277 struct b53_device *dev = ds->priv;
1278 struct net_device *bridge_dev;
1279 unsigned int i;
1280 u16 pvid, new_pvid;
1281
1282 /* Handle the case were multiple bridges span the same switch device
1283 * and one of them has a different setting than what is being requested
1284 * which would be breaking filtering semantics for any of the other
1285 * bridge devices.
1286 */
1287 b53_for_each_port(dev, i) {
1288 bridge_dev = dsa_to_port(ds, i)->bridge_dev;
1289 if (bridge_dev &&
1290 bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
1291 br_vlan_enabled(bridge_dev) != vlan_filtering) {
1292 netdev_err(bridge_dev,
1293 "VLAN filtering is global to the switch!\n");
1294 return -EINVAL;
1295 }
1296 }
1297
1298 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1299 new_pvid = pvid;
1300 if (dev->vlan_filtering_enabled && !vlan_filtering) {
1301 /* Filtering is currently enabled, use the default PVID since
1302 * the bridge does not expect tagging anymore
1303 */
1304 dev->ports[port].pvid = pvid;
1305 new_pvid = b53_default_pvid(dev);
1306 } else if (!dev->vlan_filtering_enabled && vlan_filtering) {
1307 /* Filtering is currently disabled, restore the previous PVID */
1308 new_pvid = dev->ports[port].pvid;
1309 }
1310
1311 if (pvid != new_pvid)
1312 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1313 new_pvid);
1314
1315 b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
1316
1258 return 0; 1317 return 0;
1259} 1318}
1260EXPORT_SYMBOL(b53_vlan_filtering); 1319EXPORT_SYMBOL(b53_vlan_filtering);
@@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
1270 if (vlan->vid_end > dev->num_vlans) 1329 if (vlan->vid_end > dev->num_vlans)
1271 return -ERANGE; 1330 return -ERANGE;
1272 1331
1273 b53_enable_vlan(dev, true); 1332 b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
1274 1333
1275 return 0; 1334 return 0;
1276} 1335}
@@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
1300 b53_fast_age_vlan(dev, vid); 1359 b53_fast_age_vlan(dev, vid);
1301 } 1360 }
1302 1361
1303 if (pvid) { 1362 if (pvid && !dsa_is_cpu_port(ds, port)) {
1304 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1363 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1305 vlan->vid_end); 1364 vlan->vid_end);
1306 b53_fast_age_vlan(dev, vid); 1365 b53_fast_age_vlan(dev, vid);
@@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
1326 1385
1327 vl->members &= ~BIT(port); 1386 vl->members &= ~BIT(port);
1328 1387
1329 if (pvid == vid) { 1388 if (pvid == vid)
1330 if (is5325(dev) || is5365(dev)) 1389 pvid = b53_default_pvid(dev);
1331 pvid = 1;
1332 else
1333 pvid = 0;
1334 }
1335 1390
1336 if (untagged && !dsa_is_cpu_port(ds, port)) 1391 if (untagged && !dsa_is_cpu_port(ds, port))
1337 vl->untag &= ~(BIT(port)); 1392 vl->untag &= ~(BIT(port));
@@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
1644 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 1699 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1645 dev->ports[port].vlan_ctl_mask = pvlan; 1700 dev->ports[port].vlan_ctl_mask = pvlan;
1646 1701
1647 if (is5325(dev) || is5365(dev)) 1702 pvid = b53_default_pvid(dev);
1648 pvid = 1;
1649 else
1650 pvid = 0;
1651 1703
1652 /* Make this port join all VLANs without VLAN entries */ 1704 /* Make this port join all VLANs without VLAN entries */
1653 if (is58xx(dev)) { 1705 if (is58xx(dev)) {
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index ec796482792d..4dc7ee38b258 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -91,6 +91,7 @@ enum {
91struct b53_port { 91struct b53_port {
92 u16 vlan_ctl_mask; 92 u16 vlan_ctl_mask;
93 struct ethtool_eee eee; 93 struct ethtool_eee eee;
94 u16 pvid;
94}; 95};
95 96
96struct b53_vlan { 97struct b53_vlan {
@@ -137,6 +138,8 @@ struct b53_device {
137 138
138 unsigned int num_vlans; 139 unsigned int num_vlans;
139 struct b53_vlan *vlans; 140 struct b53_vlan *vlans;
141 bool vlan_enabled;
142 bool vlan_filtering_enabled;
140 unsigned int num_ports; 143 unsigned int num_ports;
141 struct b53_port *ports; 144 struct b53_port *ports;
142}; 145};
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 90f514252987..d9c56a779c08 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev)
511 /* Clear all pending interrupts */ 511 /* Clear all pending interrupts */
512 writel(0xffffffff, priv->regs + B53_SRAB_INTR); 512 writel(0xffffffff, priv->regs + B53_SRAB_INTR);
513 513
514 if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
515 return;
516
517 for (i = 0; i < B53_N_PORTS; i++) { 514 for (i = 0; i < B53_N_PORTS; i++) {
518 port = &priv->port_intrs[i]; 515 port = &priv->port_intrs[i];
519 516
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 361fbde76654..14138d423cf1 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -690,7 +690,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
690 * port, the other ones have already been disabled during 690 * port, the other ones have already been disabled during
691 * bcm_sf2_sw_setup 691 * bcm_sf2_sw_setup
692 */ 692 */
693 for (port = 0; port < DSA_MAX_PORTS; port++) { 693 for (port = 0; port < ds->num_ports; port++) {
694 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) 694 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
695 bcm_sf2_port_disable(ds, port, NULL); 695 bcm_sf2_port_disable(ds, port, NULL);
696 } 696 }
@@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
726{ 726{
727 struct net_device *p = ds->ports[port].cpu_dp->master; 727 struct net_device *p = ds->ports[port].cpu_dp->master;
728 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 728 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
729 struct ethtool_wolinfo pwol; 729 struct ethtool_wolinfo pwol = { };
730 730
731 /* Get the parent device WoL settings */ 731 /* Get the parent device WoL settings */
732 p->ethtool_ops->get_wol(p, &pwol); 732 if (p->ethtool_ops->get_wol)
733 p->ethtool_ops->get_wol(p, &pwol);
733 734
734 /* Advertise the parent device supported settings */ 735 /* Advertise the parent device supported settings */
735 wol->supported = pwol.supported; 736 wol->supported = pwol.supported;
@@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
750 struct net_device *p = ds->ports[port].cpu_dp->master; 751 struct net_device *p = ds->ports[port].cpu_dp->master;
751 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 752 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
752 s8 cpu_port = ds->ports[port].cpu_dp->index; 753 s8 cpu_port = ds->ports[port].cpu_dp->index;
753 struct ethtool_wolinfo pwol; 754 struct ethtool_wolinfo pwol = { };
754 755
755 p->ethtool_ops->get_wol(p, &pwol); 756 if (p->ethtool_ops->get_wol)
757 p->ethtool_ops->get_wol(p, &pwol);
756 if (wol->wolopts & ~pwol.supported) 758 if (wol->wolopts & ~pwol.supported)
757 return -EINVAL; 759 return -EINVAL;
758 760
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 3b12e2dcff31..8a5111f9414c 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -7,7 +7,6 @@
7 7
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/gpio.h>
11#include <linux/gpio/consumer.h> 10#include <linux/gpio/consumer.h>
12#include <linux/kernel.h> 11#include <linux/kernel.h>
13#include <linux/module.h> 12#include <linux/module.h>
@@ -15,7 +14,6 @@
15#include <linux/phy.h> 14#include <linux/phy.h>
16#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
17#include <linux/if_bridge.h> 16#include <linux/if_bridge.h>
18#include <linux/of_gpio.h>
19#include <linux/of_net.h> 17#include <linux/of_net.h>
20#include <net/dsa.h> 18#include <net/dsa.h>
21#include <net/switchdev.h> 19#include <net/switchdev.h>
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 74547f43b938..a8a2c728afba 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -18,7 +18,6 @@
18#include <linux/mfd/syscon.h> 18#include <linux/mfd/syscon.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/of_gpio.h>
22#include <linux/of_mdio.h> 21#include <linux/of_mdio.h>
23#include <linux/of_net.h> 22#include <linux/of_net.h>
24#include <linux/of_platform.h> 23#include <linux/of_platform.h>
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8a517d8fb9d1..12fd7ce3f1ff 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
261 unsigned int sub_irq; 261 unsigned int sub_irq;
262 unsigned int n; 262 unsigned int n;
263 u16 reg; 263 u16 reg;
264 u16 ctl1;
264 int err; 265 int err;
265 266
266 mutex_lock(&chip->reg_lock); 267 mutex_lock(&chip->reg_lock);
@@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
270 if (err) 271 if (err)
271 goto out; 272 goto out;
272 273
273 for (n = 0; n < chip->g1_irq.nirqs; ++n) { 274 do {
274 if (reg & (1 << n)) { 275 for (n = 0; n < chip->g1_irq.nirqs; ++n) {
275 sub_irq = irq_find_mapping(chip->g1_irq.domain, n); 276 if (reg & (1 << n)) {
276 handle_nested_irq(sub_irq); 277 sub_irq = irq_find_mapping(chip->g1_irq.domain,
277 ++nhandled; 278 n);
279 handle_nested_irq(sub_irq);
280 ++nhandled;
281 }
278 } 282 }
279 } 283
284 mutex_lock(&chip->reg_lock);
285 err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
286 if (err)
287 goto unlock;
288 err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
289unlock:
290 mutex_unlock(&chip->reg_lock);
291 if (err)
292 goto out;
293 ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
294 } while (reg & ctl1);
295
280out: 296out:
281 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); 297 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
282} 298}
@@ -2403,6 +2419,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
2403 return mv88e6xxx_g1_stats_clear(chip); 2419 return mv88e6xxx_g1_stats_clear(chip);
2404} 2420}
2405 2421
2422/* The mv88e6390 has some hidden registers used for debug and
2423 * development. The errata also makes use of them.
2424 */
2425static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
2426 int reg, u16 val)
2427{
2428 u16 ctrl;
2429 int err;
2430
2431 err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
2432 PORT_RESERVED_1A, val);
2433 if (err)
2434 return err;
2435
2436 ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
2437 PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
2438 reg;
2439
2440 return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
2441 PORT_RESERVED_1A, ctrl);
2442}
2443
2444static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
2445{
2446 return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
2447 PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
2448}
2449
2450
2451static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
2452 int reg, u16 *val)
2453{
2454 u16 ctrl;
2455 int err;
2456
2457 ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
2458 PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
2459 reg;
2460
2461 err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
2462 PORT_RESERVED_1A, ctrl);
2463 if (err)
2464 return err;
2465
2466 err = mv88e6390_hidden_wait(chip);
2467 if (err)
2468 return err;
2469
2470 return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
2471 PORT_RESERVED_1A, val);
2472}
2473
2474/* Check if the errata has already been applied. */
2475static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
2476{
2477 int port;
2478 int err;
2479 u16 val;
2480
2481 for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2482 err = mv88e6390_hidden_read(chip, port, 0, &val);
2483 if (err) {
2484 dev_err(chip->dev,
2485 "Error reading hidden register: %d\n", err);
2486 return false;
2487 }
2488 if (val != 0x01c0)
2489 return false;
2490 }
2491
2492 return true;
2493}
2494
2495/* The 6390 copper ports have an errata which require poking magic
2496 * values into undocumented hidden registers and then performing a
2497 * software reset.
2498 */
2499static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
2500{
2501 int port;
2502 int err;
2503
2504 if (mv88e6390_setup_errata_applied(chip))
2505 return 0;
2506
2507 /* Set the ports into blocking mode */
2508 for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2509 err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
2510 if (err)
2511 return err;
2512 }
2513
2514 for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2515 err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
2516 if (err)
2517 return err;
2518 }
2519
2520 return mv88e6xxx_software_reset(chip);
2521}
2522
2406static int mv88e6xxx_setup(struct dsa_switch *ds) 2523static int mv88e6xxx_setup(struct dsa_switch *ds)
2407{ 2524{
2408 struct mv88e6xxx_chip *chip = ds->priv; 2525 struct mv88e6xxx_chip *chip = ds->priv;
@@ -2415,6 +2532,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2415 2532
2416 mutex_lock(&chip->reg_lock); 2533 mutex_lock(&chip->reg_lock);
2417 2534
2535 if (chip->info->ops->setup_errata) {
2536 err = chip->info->ops->setup_errata(chip);
2537 if (err)
2538 goto unlock;
2539 }
2540
2418 /* Cache the cmode of each port. */ 2541 /* Cache the cmode of each port. */
2419 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { 2542 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
2420 if (chip->info->ops->port_get_cmode) { 2543 if (chip->info->ops->port_get_cmode) {
@@ -3226,6 +3349,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
3226 3349
3227static const struct mv88e6xxx_ops mv88e6190_ops = { 3350static const struct mv88e6xxx_ops mv88e6190_ops = {
3228 /* MV88E6XXX_FAMILY_6390 */ 3351 /* MV88E6XXX_FAMILY_6390 */
3352 .setup_errata = mv88e6390_setup_errata,
3229 .irl_init_all = mv88e6390_g2_irl_init_all, 3353 .irl_init_all = mv88e6390_g2_irl_init_all,
3230 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3354 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3231 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3355 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3269,6 +3393,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
3269 3393
3270static const struct mv88e6xxx_ops mv88e6190x_ops = { 3394static const struct mv88e6xxx_ops mv88e6190x_ops = {
3271 /* MV88E6XXX_FAMILY_6390 */ 3395 /* MV88E6XXX_FAMILY_6390 */
3396 .setup_errata = mv88e6390_setup_errata,
3272 .irl_init_all = mv88e6390_g2_irl_init_all, 3397 .irl_init_all = mv88e6390_g2_irl_init_all,
3273 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3398 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3274 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3399 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3312,6 +3437,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
3312 3437
3313static const struct mv88e6xxx_ops mv88e6191_ops = { 3438static const struct mv88e6xxx_ops mv88e6191_ops = {
3314 /* MV88E6XXX_FAMILY_6390 */ 3439 /* MV88E6XXX_FAMILY_6390 */
3440 .setup_errata = mv88e6390_setup_errata,
3315 .irl_init_all = mv88e6390_g2_irl_init_all, 3441 .irl_init_all = mv88e6390_g2_irl_init_all,
3316 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3442 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3317 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3443 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3404,6 +3530,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
3404 3530
3405static const struct mv88e6xxx_ops mv88e6290_ops = { 3531static const struct mv88e6xxx_ops mv88e6290_ops = {
3406 /* MV88E6XXX_FAMILY_6390 */ 3532 /* MV88E6XXX_FAMILY_6390 */
3533 .setup_errata = mv88e6390_setup_errata,
3407 .irl_init_all = mv88e6390_g2_irl_init_all, 3534 .irl_init_all = mv88e6390_g2_irl_init_all,
3408 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3535 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3409 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3536 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3709,6 +3836,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
3709 3836
3710static const struct mv88e6xxx_ops mv88e6390_ops = { 3837static const struct mv88e6xxx_ops mv88e6390_ops = {
3711 /* MV88E6XXX_FAMILY_6390 */ 3838 /* MV88E6XXX_FAMILY_6390 */
3839 .setup_errata = mv88e6390_setup_errata,
3712 .irl_init_all = mv88e6390_g2_irl_init_all, 3840 .irl_init_all = mv88e6390_g2_irl_init_all,
3713 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3841 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3714 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3842 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3756,6 +3884,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
3756 3884
3757static const struct mv88e6xxx_ops mv88e6390x_ops = { 3885static const struct mv88e6xxx_ops mv88e6390x_ops = {
3758 /* MV88E6XXX_FAMILY_6390 */ 3886 /* MV88E6XXX_FAMILY_6390 */
3887 .setup_errata = mv88e6390_setup_errata,
3759 .irl_init_all = mv88e6390_g2_irl_init_all, 3888 .irl_init_all = mv88e6390_g2_irl_init_all,
3760 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3889 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3761 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3890 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index f9ecb7872d32..546651d8c3e1 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus {
300}; 300};
301 301
302struct mv88e6xxx_ops { 302struct mv88e6xxx_ops {
303 /* Switch Setup Errata, called early in the switch setup to
304 * allow any errata actions to be performed
305 */
306 int (*setup_errata)(struct mv88e6xxx_chip *chip);
307
303 int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); 308 int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
304 int (*ip_pri_map)(struct mv88e6xxx_chip *chip); 309 int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
305 310
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 5200e4bdce93..ea243840ee0f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
314{ 314{
315 struct mv88e6xxx_chip *chip = dev_id; 315 struct mv88e6xxx_chip *chip = dev_id;
316 struct mv88e6xxx_atu_entry entry; 316 struct mv88e6xxx_atu_entry entry;
317 int spid;
317 int err; 318 int err;
318 u16 val; 319 u16 val;
319 320
@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
336 if (err) 337 if (err)
337 goto out; 338 goto out;
338 339
340 spid = entry.state;
341
339 if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { 342 if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
340 dev_err_ratelimited(chip->dev, 343 dev_err_ratelimited(chip->dev,
341 "ATU age out violation for %pM\n", 344 "ATU age out violation for %pM\n",
@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
344 347
345 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { 348 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
346 dev_err_ratelimited(chip->dev, 349 dev_err_ratelimited(chip->dev,
347 "ATU member violation for %pM portvec %x\n", 350 "ATU member violation for %pM portvec %x spid %d\n",
348 entry.mac, entry.portvec); 351 entry.mac, entry.portvec, spid);
349 chip->ports[entry.portvec].atu_member_violation++; 352 chip->ports[spid].atu_member_violation++;
350 } 353 }
351 354
352 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { 355 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
353 dev_err_ratelimited(chip->dev, 356 dev_err_ratelimited(chip->dev,
354 "ATU miss violation for %pM portvec %x\n", 357 "ATU miss violation for %pM portvec %x spid %d\n",
355 entry.mac, entry.portvec); 358 entry.mac, entry.portvec, spid);
356 chip->ports[entry.portvec].atu_miss_violation++; 359 chip->ports[spid].atu_miss_violation++;
357 } 360 }
358 361
359 if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { 362 if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
360 dev_err_ratelimited(chip->dev, 363 dev_err_ratelimited(chip->dev,
361 "ATU full violation for %pM portvec %x\n", 364 "ATU full violation for %pM portvec %x spid %d\n",
362 entry.mac, entry.portvec); 365 entry.mac, entry.portvec, spid);
363 chip->ports[entry.portvec].atu_full_violation++; 366 chip->ports[spid].atu_full_violation++;
364 } 367 }
365 mutex_unlock(&chip->reg_lock); 368 mutex_unlock(&chip->reg_lock);
366 369
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 0d81866d0e4a..e583641de758 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -251,6 +251,16 @@
251/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ 251/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
252#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 252#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
253 253
254/* Offset 0x1a: Magic undocumented errata register */
255#define PORT_RESERVED_1A 0x1a
256#define PORT_RESERVED_1A_BUSY BIT(15)
257#define PORT_RESERVED_1A_WRITE BIT(14)
258#define PORT_RESERVED_1A_READ 0
259#define PORT_RESERVED_1A_PORT_SHIFT 5
260#define PORT_RESERVED_1A_BLOCK (0xf << 10)
261#define PORT_RESERVED_1A_CTRL_PORT 4
262#define PORT_RESERVED_1A_DATA_PORT 5
263
254int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, 264int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
255 u16 *val); 265 u16 *val);
256int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, 266int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2caa8c8b4b55..1bfc5ff8d81d 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -664,7 +664,7 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
664 if (port < 9) 664 if (port < 9)
665 return 0; 665 return 0;
666 666
667 return mv88e6390_serdes_irq_setup(chip, port); 667 return mv88e6390x_serdes_irq_setup(chip, port);
668} 668}
669 669
670void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) 670void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
index b4b839a1d095..ad41ec63cc9f 100644
--- a/drivers/net/dsa/realtek-smi.c
+++ b/drivers/net/dsa/realtek-smi.c
@@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
347 struct device_node *mdio_np; 347 struct device_node *mdio_np;
348 int ret; 348 int ret;
349 349
350 mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, 350 mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
351 "realtek,smi-mdio");
352 if (!mdio_np) { 351 if (!mdio_np) {
353 dev_err(smi->dev, "no MDIO bus node\n"); 352 dev_err(smi->dev, "no MDIO bus node\n");
354 return -ENODEV; 353 return -ENODEV;
355 } 354 }
356 355
357 smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); 356 smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
358 if (!smi->slave_mii_bus) 357 if (!smi->slave_mii_bus) {
359 return -ENOMEM; 358 ret = -ENOMEM;
359 goto err_put_node;
360 }
360 smi->slave_mii_bus->priv = smi; 361 smi->slave_mii_bus->priv = smi;
361 smi->slave_mii_bus->name = "SMI slave MII"; 362 smi->slave_mii_bus->name = "SMI slave MII";
362 smi->slave_mii_bus->read = realtek_smi_mdio_read; 363 smi->slave_mii_bus->read = realtek_smi_mdio_read;
@@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
371 if (ret) { 372 if (ret) {
372 dev_err(smi->dev, "unable to register MDIO bus %s\n", 373 dev_err(smi->dev, "unable to register MDIO bus %s\n",
373 smi->slave_mii_bus->id); 374 smi->slave_mii_bus->id);
374 of_node_put(mdio_np); 375 goto err_put_node;
375 } 376 }
376 377
377 return 0; 378 return 0;
379
380err_put_node:
381 of_node_put(mdio_np);
382
383 return ret;
378} 384}
379 385
380static int realtek_smi_probe(struct platform_device *pdev) 386static int realtek_smi_probe(struct platform_device *pdev)
@@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev)
457 struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); 463 struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
458 464
459 dsa_unregister_switch(smi->ds); 465 dsa_unregister_switch(smi->ds);
466 if (smi->slave_mii_bus)
467 of_node_put(smi->slave_mii_bus->dev.of_node);
460 gpiod_set_value(smi->reset, 1); 468 gpiod_set_value(smi->reset, 1);
461 469
462 return 0; 470 return 0;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 91fc64c1145e..47e5984f16fb 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1433,18 +1433,18 @@ static int greth_of_probe(struct platform_device *ofdev)
1433 } 1433 }
1434 1434
1435 /* Allocate TX descriptor ring in coherent memory */ 1435 /* Allocate TX descriptor ring in coherent memory */
1436 greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024, 1436 greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1437 &greth->tx_bd_base_phys, 1437 &greth->tx_bd_base_phys,
1438 GFP_KERNEL); 1438 GFP_KERNEL);
1439 if (!greth->tx_bd_base) { 1439 if (!greth->tx_bd_base) {
1440 err = -ENOMEM; 1440 err = -ENOMEM;
1441 goto error3; 1441 goto error3;
1442 } 1442 }
1443 1443
1444 /* Allocate RX descriptor ring in coherent memory */ 1444 /* Allocate RX descriptor ring in coherent memory */
1445 greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024, 1445 greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1446 &greth->rx_bd_base_phys, 1446 &greth->rx_bd_base_phys,
1447 GFP_KERNEL); 1447 GFP_KERNEL);
1448 if (!greth->rx_bd_base) { 1448 if (!greth->rx_bd_base) {
1449 err = -ENOMEM; 1449 err = -ENOMEM;
1450 goto error4; 1450 goto error4;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 0b60921c392f..16477aa6d61f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -795,8 +795,8 @@ static int slic_init_stat_queue(struct slic_device *sdev)
795 size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; 795 size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
796 796
797 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { 797 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
798 descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr, 798 descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr,
799 GFP_KERNEL); 799 GFP_KERNEL);
800 if (!descs) { 800 if (!descs) {
801 netdev_err(sdev->netdev, 801 netdev_err(sdev->netdev,
802 "failed to allocate status descriptors\n"); 802 "failed to allocate status descriptors\n");
@@ -1240,8 +1240,8 @@ static int slic_init_shmem(struct slic_device *sdev)
1240 struct slic_shmem_data *sm_data; 1240 struct slic_shmem_data *sm_data;
1241 dma_addr_t paddr; 1241 dma_addr_t paddr;
1242 1242
1243 sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), 1243 sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
1244 &paddr, GFP_KERNEL); 1244 &paddr, GFP_KERNEL);
1245 if (!sm_data) { 1245 if (!sm_data) {
1246 dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); 1246 dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
1247 return -ENOMEM; 1247 return -ENOMEM;
@@ -1621,8 +1621,8 @@ static int slic_read_eeprom(struct slic_device *sdev)
1621 int err = 0; 1621 int err = 0;
1622 u8 *mac[2]; 1622 u8 *mac[2];
1623 1623
1624 eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, 1624 eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
1625 &paddr, GFP_KERNEL); 1625 &paddr, GFP_KERNEL);
1626 if (!eeprom) 1626 if (!eeprom)
1627 return -ENOMEM; 1627 return -ENOMEM;
1628 1628
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 4f11f98347ed..1827ef1f6d55 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2059,7 +2059,7 @@ static inline void ace_tx_int(struct net_device *dev,
2059 if (skb) { 2059 if (skb) {
2060 dev->stats.tx_packets++; 2060 dev->stats.tx_packets++;
2061 dev->stats.tx_bytes += skb->len; 2061 dev->stats.tx_bytes += skb->len;
2062 dev_kfree_skb_irq(skb); 2062 dev_consume_skb_irq(skb);
2063 info->skb = NULL; 2063 info->skb = NULL;
2064 } 2064 }
2065 2065
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 0fb986ba3290..0ae723f75341 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
145 & 0xffff; 145 & 0xffff;
146 146
147 if (inuse) { /* Tx FIFO is not empty */ 147 if (inuse) { /* Tx FIFO is not empty */
148 ready = priv->tx_prod - priv->tx_cons - inuse - 1; 148 ready = max_t(int,
149 priv->tx_prod - priv->tx_cons - inuse - 1, 0);
149 } else { 150 } else {
150 /* Check for buffered last packet */ 151 /* Check for buffered last packet */
151 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); 152 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 02921d877c08..aa1d1f5339d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
714 714
715 phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, 715 phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
716 priv->phy_iface); 716 priv->phy_iface);
717 if (IS_ERR(phydev)) 717 if (IS_ERR(phydev)) {
718 netdev_err(dev, "Could not attach to PHY\n"); 718 netdev_err(dev, "Could not attach to PHY\n");
719 phydev = NULL;
720 }
719 721
720 } else { 722 } else {
721 int ret; 723 int ret;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 420cede41ca4..b17d435de09f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -111,8 +111,8 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
111 struct ena_com_admin_sq *sq = &queue->sq; 111 struct ena_com_admin_sq *sq = &queue->sq;
112 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 112 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
113 113
114 sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, 114 sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
115 GFP_KERNEL); 115 GFP_KERNEL);
116 116
117 if (!sq->entries) { 117 if (!sq->entries) {
118 pr_err("memory allocation failed"); 118 pr_err("memory allocation failed");
@@ -133,8 +133,8 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
133 struct ena_com_admin_cq *cq = &queue->cq; 133 struct ena_com_admin_cq *cq = &queue->cq;
134 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 134 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
135 135
136 cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, 136 cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
137 GFP_KERNEL); 137 GFP_KERNEL);
138 138
139 if (!cq->entries) { 139 if (!cq->entries) {
140 pr_err("memory allocation failed"); 140 pr_err("memory allocation failed");
@@ -156,8 +156,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
156 156
157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
159 aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, 159 aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
160 GFP_KERNEL); 160 GFP_KERNEL);
161 161
162 if (!aenq->entries) { 162 if (!aenq->entries) {
163 pr_err("memory allocation failed"); 163 pr_err("memory allocation failed");
@@ -344,15 +344,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
344 dev_node = dev_to_node(ena_dev->dmadev); 344 dev_node = dev_to_node(ena_dev->dmadev);
345 set_dev_node(ena_dev->dmadev, ctx->numa_node); 345 set_dev_node(ena_dev->dmadev, ctx->numa_node);
346 io_sq->desc_addr.virt_addr = 346 io_sq->desc_addr.virt_addr =
347 dma_zalloc_coherent(ena_dev->dmadev, size, 347 dma_alloc_coherent(ena_dev->dmadev, size,
348 &io_sq->desc_addr.phys_addr, 348 &io_sq->desc_addr.phys_addr,
349 GFP_KERNEL); 349 GFP_KERNEL);
350 set_dev_node(ena_dev->dmadev, dev_node); 350 set_dev_node(ena_dev->dmadev, dev_node);
351 if (!io_sq->desc_addr.virt_addr) { 351 if (!io_sq->desc_addr.virt_addr) {
352 io_sq->desc_addr.virt_addr = 352 io_sq->desc_addr.virt_addr =
353 dma_zalloc_coherent(ena_dev->dmadev, size, 353 dma_alloc_coherent(ena_dev->dmadev, size,
354 &io_sq->desc_addr.phys_addr, 354 &io_sq->desc_addr.phys_addr,
355 GFP_KERNEL); 355 GFP_KERNEL);
356 } 356 }
357 357
358 if (!io_sq->desc_addr.virt_addr) { 358 if (!io_sq->desc_addr.virt_addr) {
@@ -425,14 +425,14 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
425 prev_node = dev_to_node(ena_dev->dmadev); 425 prev_node = dev_to_node(ena_dev->dmadev);
426 set_dev_node(ena_dev->dmadev, ctx->numa_node); 426 set_dev_node(ena_dev->dmadev, ctx->numa_node);
427 io_cq->cdesc_addr.virt_addr = 427 io_cq->cdesc_addr.virt_addr =
428 dma_zalloc_coherent(ena_dev->dmadev, size, 428 dma_alloc_coherent(ena_dev->dmadev, size,
429 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 429 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
430 set_dev_node(ena_dev->dmadev, prev_node); 430 set_dev_node(ena_dev->dmadev, prev_node);
431 if (!io_cq->cdesc_addr.virt_addr) { 431 if (!io_cq->cdesc_addr.virt_addr) {
432 io_cq->cdesc_addr.virt_addr = 432 io_cq->cdesc_addr.virt_addr =
433 dma_zalloc_coherent(ena_dev->dmadev, size, 433 dma_alloc_coherent(ena_dev->dmadev, size,
434 &io_cq->cdesc_addr.phys_addr, 434 &io_cq->cdesc_addr.phys_addr,
435 GFP_KERNEL); 435 GFP_KERNEL);
436 } 436 }
437 437
438 if (!io_cq->cdesc_addr.virt_addr) { 438 if (!io_cq->cdesc_addr.virt_addr) {
@@ -1026,8 +1026,8 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1026 struct ena_rss *rss = &ena_dev->rss; 1026 struct ena_rss *rss = &ena_dev->rss;
1027 1027
1028 rss->hash_key = 1028 rss->hash_key =
1029 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1029 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1030 &rss->hash_key_dma_addr, GFP_KERNEL); 1030 &rss->hash_key_dma_addr, GFP_KERNEL);
1031 1031
1032 if (unlikely(!rss->hash_key)) 1032 if (unlikely(!rss->hash_key))
1033 return -ENOMEM; 1033 return -ENOMEM;
@@ -1050,8 +1050,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1050 struct ena_rss *rss = &ena_dev->rss; 1050 struct ena_rss *rss = &ena_dev->rss;
1051 1051
1052 rss->hash_ctrl = 1052 rss->hash_ctrl =
1053 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1053 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1054 &rss->hash_ctrl_dma_addr, GFP_KERNEL); 1054 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1055 1055
1056 if (unlikely(!rss->hash_ctrl)) 1056 if (unlikely(!rss->hash_ctrl))
1057 return -ENOMEM; 1057 return -ENOMEM;
@@ -1094,8 +1094,8 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1094 sizeof(struct ena_admin_rss_ind_table_entry); 1094 sizeof(struct ena_admin_rss_ind_table_entry);
1095 1095
1096 rss->rss_ind_tbl = 1096 rss->rss_ind_tbl =
1097 dma_zalloc_coherent(ena_dev->dmadev, tbl_size, 1097 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1098 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 1098 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1099 if (unlikely(!rss->rss_ind_tbl)) 1099 if (unlikely(!rss->rss_ind_tbl))
1100 goto mem_err1; 1100 goto mem_err1;
1101 1101
@@ -1649,9 +1649,9 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1649 1649
1650 spin_lock_init(&mmio_read->lock); 1650 spin_lock_init(&mmio_read->lock);
1651 mmio_read->read_resp = 1651 mmio_read->read_resp =
1652 dma_zalloc_coherent(ena_dev->dmadev, 1652 dma_alloc_coherent(ena_dev->dmadev,
1653 sizeof(*mmio_read->read_resp), 1653 sizeof(*mmio_read->read_resp),
1654 &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1654 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1655 if (unlikely(!mmio_read->read_resp)) 1655 if (unlikely(!mmio_read->read_resp))
1656 goto err; 1656 goto err;
1657 1657
@@ -2623,8 +2623,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2623 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2623 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2624 2624
2625 host_attr->host_info = 2625 host_attr->host_info =
2626 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, 2626 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2627 &host_attr->host_info_dma_addr, GFP_KERNEL); 2627 &host_attr->host_info_dma_addr, GFP_KERNEL);
2628 if (unlikely(!host_attr->host_info)) 2628 if (unlikely(!host_attr->host_info))
2629 return -ENOMEM; 2629 return -ENOMEM;
2630 2630
@@ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2641 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2641 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2642 2642
2643 host_attr->debug_area_virt_addr = 2643 host_attr->debug_area_virt_addr =
2644 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, 2644 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2645 &host_attr->debug_area_dma_addr, GFP_KERNEL); 2645 &host_attr->debug_area_dma_addr,
2646 GFP_KERNEL);
2646 if (unlikely(!host_attr->debug_area_virt_addr)) { 2647 if (unlikely(!host_attr->debug_area_virt_addr)) {
2647 host_attr->debug_area_size = 0; 2648 host_attr->debug_area_size = 0;
2648 return -ENOMEM; 2649 return -ENOMEM;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a70bb1bb90e7..a6eacf2099c3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
2663 goto err_device_destroy; 2663 goto err_device_destroy;
2664 } 2664 }
2665 2665
2666 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2667 /* Make sure we don't have a race with AENQ Links state handler */
2668 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2669 netif_carrier_on(adapter->netdev);
2670
2671 rc = ena_enable_msix_and_set_admin_interrupts(adapter, 2666 rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2672 adapter->num_queues); 2667 adapter->num_queues);
2673 if (rc) { 2668 if (rc) {
@@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
2684 } 2679 }
2685 2680
2686 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2681 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2682
2683 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2684 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2685 netif_carrier_on(adapter->netdev);
2686
2687 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 2687 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2688 dev_err(&pdev->dev, 2688 dev_err(&pdev->dev,
2689 "Device reset completed successfully, Driver info: %s\n", 2689 "Device reset completed successfully, Driver info: %s\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index dc8b6173d8d8..63870072cbbd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
45 45
46#define DRV_MODULE_VER_MAJOR 2 46#define DRV_MODULE_VER_MAJOR 2
47#define DRV_MODULE_VER_MINOR 0 47#define DRV_MODULE_VER_MINOR 0
48#define DRV_MODULE_VER_SUBMINOR 2 48#define DRV_MODULE_VER_SUBMINOR 3
49 49
50#define DRV_MODULE_NAME "ena" 50#define DRV_MODULE_NAME "ena"
51#ifndef DRV_MODULE_VERSION 51#ifndef DRV_MODULE_VERSION
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index a90080f12e67..e548c0ae2e00 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -666,7 +666,7 @@ static int amd8111e_tx(struct net_device *dev)
666 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], 666 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
667 lp->tx_skbuff[tx_index]->len, 667 lp->tx_skbuff[tx_index]->len,
668 PCI_DMA_TODEVICE); 668 PCI_DMA_TODEVICE);
669 dev_kfree_skb_irq (lp->tx_skbuff[tx_index]); 669 dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
670 lp->tx_skbuff[tx_index] = NULL; 670 lp->tx_skbuff[tx_index] = NULL;
671 lp->tx_dma_addr[tx_index] = 0; 671 lp->tx_dma_addr[tx_index] = 0;
672 } 672 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index d272dc6984ac..b40d4377cc71 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -431,8 +431,6 @@
431#define MAC_MDIOSCAR_PA_WIDTH 5 431#define MAC_MDIOSCAR_PA_WIDTH 5
432#define MAC_MDIOSCAR_RA_INDEX 0 432#define MAC_MDIOSCAR_RA_INDEX 0
433#define MAC_MDIOSCAR_RA_WIDTH 16 433#define MAC_MDIOSCAR_RA_WIDTH 16
434#define MAC_MDIOSCAR_REG_INDEX 0
435#define MAC_MDIOSCAR_REG_WIDTH 21
436#define MAC_MDIOSCCDR_BUSY_INDEX 22 434#define MAC_MDIOSCCDR_BUSY_INDEX 22
437#define MAC_MDIOSCCDR_BUSY_WIDTH 1 435#define MAC_MDIOSCCDR_BUSY_WIDTH 1
438#define MAC_MDIOSCCDR_CMD_INDEX 16 436#define MAC_MDIOSCCDR_CMD_INDEX 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1e929a1e4ca7..4666084eda16 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1284 } 1284 }
1285} 1285}
1286 1286
1287static unsigned int xgbe_create_mdio_sca(int port, int reg)
1288{
1289 unsigned int mdio_sca, da;
1290
1291 da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1292
1293 mdio_sca = 0;
1294 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1295 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1296 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1297
1298 return mdio_sca;
1299}
1300
1287static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, 1301static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1288 int reg, u16 val) 1302 int reg, u16 val)
1289{ 1303{
@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1291 1305
1292 reinit_completion(&pdata->mdio_complete); 1306 reinit_completion(&pdata->mdio_complete);
1293 1307
1294 mdio_sca = 0; 1308 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1295 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1296 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1297 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1309 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1298 1310
1299 mdio_sccd = 0; 1311 mdio_sccd = 0;
@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1317 1329
1318 reinit_completion(&pdata->mdio_complete); 1330 reinit_completion(&pdata->mdio_complete);
1319 1331
1320 mdio_sca = 0; 1332 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1321 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1322 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1323 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1333 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1324 1334
1325 mdio_sccd = 0; 1335 mdio_sccd = 0;
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 0f2ad50f3bd7..87b142a312e0 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -206,8 +206,8 @@ static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
206 } 206 }
207 207
208 /* Packet buffers should be 64B aligned */ 208 /* Packet buffers should be 64B aligned */
209 pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, 209 pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
210 GFP_ATOMIC); 210 GFP_ATOMIC);
211 if (unlikely(!pkt_buf)) { 211 if (unlikely(!pkt_buf)) {
212 dev_kfree_skb_any(skb); 212 dev_kfree_skb_any(skb);
213 return NETDEV_TX_OK; 213 return NETDEV_TX_OK;
@@ -428,8 +428,8 @@ static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
428 ring->ndev = ndev; 428 ring->ndev = ndev;
429 429
430 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; 430 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
431 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr, 431 ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
432 GFP_KERNEL); 432 GFP_KERNEL);
433 if (!ring->desc_addr) 433 if (!ring->desc_addr)
434 goto err; 434 goto err;
435 435
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 6a8e2567f2bd..4d3855ceb500 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -777,7 +777,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
777 777
778 if (bp->tx_bufs[bp->tx_empty]) { 778 if (bp->tx_bufs[bp->tx_empty]) {
779 ++dev->stats.tx_packets; 779 ++dev->stats.tx_packets;
780 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); 780 dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
781 } 781 }
782 bp->tx_bufs[bp->tx_empty] = NULL; 782 bp->tx_bufs[bp->tx_empty] = NULL;
783 bp->tx_fullup = 0; 783 bp->tx_fullup = 0;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c131cfc1b79d..e3538ba7d0e7 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -660,10 +660,9 @@ static int alx_alloc_rings(struct alx_priv *alx)
660 alx->num_txq + 660 alx->num_txq +
661 sizeof(struct alx_rrd) * alx->rx_ringsz + 661 sizeof(struct alx_rrd) * alx->rx_ringsz +
662 sizeof(struct alx_rfd) * alx->rx_ringsz; 662 sizeof(struct alx_rfd) * alx->rx_ringsz;
663 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, 663 alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev,
664 alx->descmem.size, 664 alx->descmem.size,
665 &alx->descmem.dma, 665 &alx->descmem.dma, GFP_KERNEL);
666 GFP_KERNEL);
667 if (!alx->descmem.virt) 666 if (!alx->descmem.virt)
668 return -ENOMEM; 667 return -ENOMEM;
669 668
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 7087b88550db..3a3b35b5df67 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1019,8 +1019,8 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
1019 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1019 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
1020 8 * 4; 1020 8 * 4;
1021 1021
1022 ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, 1022 ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
1023 &ring_header->dma, GFP_KERNEL); 1023 &ring_header->dma, GFP_KERNEL);
1024 if (unlikely(!ring_header->desc)) { 1024 if (unlikely(!ring_header->desc)) {
1025 dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); 1025 dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
1026 goto err_nomem; 1026 goto err_nomem;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bb41becb6609..31ff1e0d1baa 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1335{ 1335{
1336 struct net_device *netdev; 1336 struct net_device *netdev;
1337 struct atl2_adapter *adapter; 1337 struct atl2_adapter *adapter;
1338 static int cards_found; 1338 static int cards_found = 0;
1339 unsigned long mmio_start; 1339 unsigned long mmio_start;
1340 int mmio_len; 1340 int mmio_len;
1341 int err; 1341 int err;
1342 1342
1343 cards_found = 0;
1344
1345 err = pci_enable_device(pdev); 1343 err = pci_enable_device(pdev);
1346 if (err) 1344 if (err)
1347 return err; 1345 return err;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index f44808959ff3..97ab0dd25552 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -638,7 +638,7 @@ static void b44_tx(struct b44 *bp)
638 bytes_compl += skb->len; 638 bytes_compl += skb->len;
639 pkts_compl++; 639 pkts_compl++;
640 640
641 dev_kfree_skb_irq(skb); 641 dev_consume_skb_irq(skb);
642 } 642 }
643 643
644 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); 644 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
@@ -1012,7 +1012,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1012 } 1012 }
1013 1013
1014 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); 1014 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1015 dev_kfree_skb_any(skb); 1015 dev_consume_skb_any(skb);
1016 skb = bounce_skb; 1016 skb = bounce_skb;
1017 } 1017 }
1018 1018
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 6bae973d4dce..09cd188826b1 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -936,7 +936,7 @@ static int bcm_enet_open(struct net_device *dev)
936 936
937 /* allocate rx dma ring */ 937 /* allocate rx dma ring */
938 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 938 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
939 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 939 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
940 if (!p) { 940 if (!p) {
941 ret = -ENOMEM; 941 ret = -ENOMEM;
942 goto out_freeirq_tx; 942 goto out_freeirq_tx;
@@ -947,7 +947,7 @@ static int bcm_enet_open(struct net_device *dev)
947 947
948 /* allocate tx dma ring */ 948 /* allocate tx dma ring */
949 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 949 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
950 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 950 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
951 if (!p) { 951 if (!p) {
952 ret = -ENOMEM; 952 ret = -ENOMEM;
953 goto out_free_rx_ring; 953 goto out_free_rx_ring;
@@ -2120,7 +2120,7 @@ static int bcm_enetsw_open(struct net_device *dev)
2120 2120
2121 /* allocate rx dma ring */ 2121 /* allocate rx dma ring */
2122 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 2122 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2123 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 2123 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2124 if (!p) { 2124 if (!p) {
2125 dev_err(kdev, "cannot allocate rx ring %u\n", size); 2125 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2126 ret = -ENOMEM; 2126 ret = -ENOMEM;
@@ -2132,7 +2132,7 @@ static int bcm_enetsw_open(struct net_device *dev)
2132 2132
2133 /* allocate tx dma ring */ 2133 /* allocate tx dma ring */
2134 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 2134 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2135 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 2135 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2136 if (!p) { 2136 if (!p) {
2137 dev_err(kdev, "cannot allocate tx ring\n"); 2137 dev_err(kdev, "cannot allocate tx ring\n");
2138 ret = -ENOMEM; 2138 ret = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4574275ef445..bc3ac369cbe3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev,
134 134
135 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 135 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
136 reg = rxchk_readl(priv, RXCHK_CONTROL); 136 reg = rxchk_readl(priv, RXCHK_CONTROL);
137 /* Clear L2 header checks, which would prevent BPDUs
138 * from being received.
139 */
140 reg &= ~RXCHK_L2_HDR_DIS;
137 if (priv->rx_chk_en) 141 if (priv->rx_chk_en)
138 reg |= RXCHK_EN; 142 reg |= RXCHK_EN;
139 else 143 else
@@ -520,7 +524,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
520 struct ethtool_wolinfo *wol) 524 struct ethtool_wolinfo *wol)
521{ 525{
522 struct bcm_sysport_priv *priv = netdev_priv(dev); 526 struct bcm_sysport_priv *priv = netdev_priv(dev);
523 u32 reg;
524 527
525 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 528 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
526 wol->wolopts = priv->wolopts; 529 wol->wolopts = priv->wolopts;
@@ -528,11 +531,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
528 if (!(priv->wolopts & WAKE_MAGICSECURE)) 531 if (!(priv->wolopts & WAKE_MAGICSECURE))
529 return; 532 return;
530 533
531 /* Return the programmed SecureOn password */ 534 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
532 reg = umac_readl(priv, UMAC_PSW_MS);
533 put_unaligned_be16(reg, &wol->sopass[0]);
534 reg = umac_readl(priv, UMAC_PSW_LS);
535 put_unaligned_be32(reg, &wol->sopass[2]);
536} 535}
537 536
538static int bcm_sysport_set_wol(struct net_device *dev, 537static int bcm_sysport_set_wol(struct net_device *dev,
@@ -548,13 +547,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
548 if (wol->wolopts & ~supported) 547 if (wol->wolopts & ~supported)
549 return -EINVAL; 548 return -EINVAL;
550 549
551 /* Program the SecureOn password */ 550 if (wol->wolopts & WAKE_MAGICSECURE)
552 if (wol->wolopts & WAKE_MAGICSECURE) { 551 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
553 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
554 UMAC_PSW_MS);
555 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
556 UMAC_PSW_LS);
557 }
558 552
559 /* Flag the device and relevant IRQ as wakeup capable */ 553 /* Flag the device and relevant IRQ as wakeup capable */
560 if (wol->wolopts) { 554 if (wol->wolopts) {
@@ -1506,8 +1500,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1506 /* We just need one DMA descriptor which is DMA-able, since writing to 1500 /* We just need one DMA descriptor which is DMA-able, since writing to
1507 * the port will allocate a new descriptor in its internal linked-list 1501 * the port will allocate a new descriptor in its internal linked-list
1508 */ 1502 */
1509 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1503 p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1510 GFP_KERNEL); 1504 GFP_KERNEL);
1511 if (!p) { 1505 if (!p) {
1512 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1506 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1513 return -ENOMEM; 1507 return -ENOMEM;
@@ -2649,13 +2643,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2649 unsigned int index, i = 0; 2643 unsigned int index, i = 0;
2650 u32 reg; 2644 u32 reg;
2651 2645
2652 /* Password has already been programmed */
2653 reg = umac_readl(priv, UMAC_MPD_CTRL); 2646 reg = umac_readl(priv, UMAC_MPD_CTRL);
2654 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 2647 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2655 reg |= MPD_EN; 2648 reg |= MPD_EN;
2656 reg &= ~PSW_EN; 2649 reg &= ~PSW_EN;
2657 if (priv->wolopts & WAKE_MAGICSECURE) 2650 if (priv->wolopts & WAKE_MAGICSECURE) {
2651 /* Program the SecureOn password */
2652 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2653 UMAC_PSW_MS);
2654 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2655 UMAC_PSW_LS);
2658 reg |= PSW_EN; 2656 reg |= PSW_EN;
2657 }
2659 umac_writel(priv, reg, UMAC_MPD_CTRL); 2658 umac_writel(priv, reg, UMAC_MPD_CTRL);
2660 2659
2661 if (priv->wolopts & WAKE_FILTER) { 2660 if (priv->wolopts & WAKE_FILTER) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 0887e6356649..0b192fea9c5d 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -12,6 +12,7 @@
12#define __BCM_SYSPORT_H 12#define __BCM_SYSPORT_H
13 13
14#include <linux/bitmap.h> 14#include <linux/bitmap.h>
15#include <linux/ethtool.h>
15#include <linux/if_vlan.h> 16#include <linux/if_vlan.h>
16#include <linux/net_dim.h> 17#include <linux/net_dim.h>
17 18
@@ -778,6 +779,7 @@ struct bcm_sysport_priv {
778 unsigned int crc_fwd:1; 779 unsigned int crc_fwd:1;
779 u16 rev; 780 u16 rev;
780 u32 wolopts; 781 u32 wolopts;
782 u8 sopass[SOPASS_MAX];
781 unsigned int wol_irq_disabled:1; 783 unsigned int wol_irq_disabled:1;
782 784
783 /* MIB related fields */ 785 /* MIB related fields */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index cabc8e49ad24..2d3a44c40221 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -634,9 +634,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
634 634
635 /* Alloc ring of descriptors */ 635 /* Alloc ring of descriptors */
636 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 636 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
637 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 637 ring->cpu_base = dma_alloc_coherent(dma_dev, size,
638 &ring->dma_base, 638 &ring->dma_base,
639 GFP_KERNEL); 639 GFP_KERNEL);
640 if (!ring->cpu_base) { 640 if (!ring->cpu_base) {
641 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", 641 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
642 ring->mmio_base); 642 ring->mmio_base);
@@ -659,9 +659,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
659 659
660 /* Alloc ring of descriptors */ 660 /* Alloc ring of descriptors */
661 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 661 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
662 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 662 ring->cpu_base = dma_alloc_coherent(dma_dev, size,
663 &ring->dma_base, 663 &ring->dma_base,
664 GFP_KERNEL); 664 GFP_KERNEL);
665 if (!ring->cpu_base) { 665 if (!ring->cpu_base) {
666 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", 666 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
667 ring->mmio_base); 667 ring->mmio_base);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index bbb247116045..d63371d70bce 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -844,8 +844,8 @@ bnx2_alloc_stats_blk(struct net_device *dev)
844 BNX2_SBLK_MSIX_ALIGN_SIZE); 844 BNX2_SBLK_MSIX_ALIGN_SIZE);
845 bp->status_stats_size = status_blk_size + 845 bp->status_stats_size = status_blk_size +
846 sizeof(struct statistics_block); 846 sizeof(struct statistics_block);
847 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, 847 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
848 &bp->status_blk_mapping, GFP_KERNEL); 848 &bp->status_blk_mapping, GFP_KERNEL);
849 if (!status_blk) 849 if (!status_blk)
850 return -ENOMEM; 850 return -ENOMEM;
851 851
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 5cd3135dfe30..03d131f777bc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2081,7 +2081,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2081 bool is_pf); 2081 bool is_pf);
2082 2082
2083#define BNX2X_ILT_ZALLOC(x, y, size) \ 2083#define BNX2X_ILT_ZALLOC(x, y, size) \
2084 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) 2084 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
2085 2085
2086#define BNX2X_ILT_FREE(x, y, size) \ 2086#define BNX2X_ILT_FREE(x, y, size) \
2087 do { \ 2087 do { \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 142bc11b9fbb..2462e7aa0c5d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -52,7 +52,7 @@ extern int bnx2x_num_queues;
52 52
53#define BNX2X_PCI_ALLOC(y, size) \ 53#define BNX2X_PCI_ALLOC(y, size) \
54({ \ 54({ \
55 void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 55 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
56 if (x) \ 56 if (x) \
57 DP(NETIF_MSG_HW, \ 57 DP(NETIF_MSG_HW, \
58 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ 58 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3aa80da973d7..d95730c6e0f2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3449,10 +3449,10 @@ alloc_ext_stats:
3449 goto alloc_tx_ext_stats; 3449 goto alloc_tx_ext_stats;
3450 3450
3451 bp->hw_rx_port_stats_ext = 3451 bp->hw_rx_port_stats_ext =
3452 dma_zalloc_coherent(&pdev->dev, 3452 dma_alloc_coherent(&pdev->dev,
3453 sizeof(struct rx_port_stats_ext), 3453 sizeof(struct rx_port_stats_ext),
3454 &bp->hw_rx_port_stats_ext_map, 3454 &bp->hw_rx_port_stats_ext_map,
3455 GFP_KERNEL); 3455 GFP_KERNEL);
3456 if (!bp->hw_rx_port_stats_ext) 3456 if (!bp->hw_rx_port_stats_ext)
3457 return 0; 3457 return 0;
3458 3458
@@ -3462,10 +3462,10 @@ alloc_tx_ext_stats:
3462 3462
3463 if (bp->hwrm_spec_code >= 0x10902) { 3463 if (bp->hwrm_spec_code >= 0x10902) {
3464 bp->hw_tx_port_stats_ext = 3464 bp->hw_tx_port_stats_ext =
3465 dma_zalloc_coherent(&pdev->dev, 3465 dma_alloc_coherent(&pdev->dev,
3466 sizeof(struct tx_port_stats_ext), 3466 sizeof(struct tx_port_stats_ext),
3467 &bp->hw_tx_port_stats_ext_map, 3467 &bp->hw_tx_port_stats_ext_map,
3468 GFP_KERNEL); 3468 GFP_KERNEL);
3469 } 3469 }
3470 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3470 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3471 } 3471 }
@@ -3903,7 +3903,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3903 if (len) 3903 if (len)
3904 break; 3904 break;
3905 /* on first few passes, just barely sleep */ 3905 /* on first few passes, just barely sleep */
3906 if (i < DFLT_HWRM_CMD_TIMEOUT) 3906 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3907 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 3907 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3908 HWRM_SHORT_MAX_TIMEOUT); 3908 HWRM_SHORT_MAX_TIMEOUT);
3909 else 3909 else
@@ -3926,7 +3926,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3926 dma_rmb(); 3926 dma_rmb();
3927 if (*valid) 3927 if (*valid)
3928 break; 3928 break;
3929 udelay(1); 3929 usleep_range(1, 5);
3930 } 3930 }
3931 3931
3932 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 3932 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
@@ -4973,12 +4973,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4973 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4973 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4974 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4974 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4975 u32 map_idx = ring->map_idx; 4975 u32 map_idx = ring->map_idx;
4976 unsigned int vector;
4976 4977
4978 vector = bp->irq_tbl[map_idx].vector;
4979 disable_irq_nosync(vector);
4977 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 4980 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
4978 if (rc) 4981 if (rc) {
4982 enable_irq(vector);
4979 goto err_out; 4983 goto err_out;
4984 }
4980 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 4985 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
4981 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4986 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4987 enable_irq(vector);
4982 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 4988 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4983 4989
4984 if (!i) { 4990 if (!i) {
@@ -5601,7 +5607,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5601 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 5607 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5602 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 5608 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
5603 if (bp->flags & BNXT_FLAG_CHIP_P5) 5609 if (bp->flags & BNXT_FLAG_CHIP_P5)
5604 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 5610 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
5611 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
5605 else 5612 else
5606 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 5613 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5607 } 5614 }
@@ -6221,9 +6228,12 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6221 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 6228 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6222 rmem->depth = 1; 6229 rmem->depth = 1;
6223 rmem->nr_pages = MAX_CTX_PAGES; 6230 rmem->nr_pages = MAX_CTX_PAGES;
6224 if (i == (nr_tbls - 1)) 6231 if (i == (nr_tbls - 1)) {
6225 rmem->nr_pages = ctx_pg->nr_pages % 6232 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6226 MAX_CTX_PAGES; 6233
6234 if (rem)
6235 rmem->nr_pages = rem;
6236 }
6227 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 6237 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6228 if (rc) 6238 if (rc)
6229 break; 6239 break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a451796deefe..2fb653e0048d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -582,7 +582,7 @@ struct nqe_cn {
582 (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ 582 (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
583 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) 583 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
584 584
585#define HWRM_VALID_BIT_DELAY_USEC 20 585#define HWRM_VALID_BIT_DELAY_USEC 150
586 586
587#define BNXT_HWRM_CHNL_CHIMP 0 587#define BNXT_HWRM_CHNL_CHIMP 0
588#define BNXT_HWRM_CHNL_KONG 1 588#define BNXT_HWRM_CHNL_KONG 1
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 15c7041e937b..70775158c8c4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -316,8 +316,8 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
316 316
317 n = IEEE_8021QAZ_MAX_TCS; 317 n = IEEE_8021QAZ_MAX_TCS;
318 data_len = sizeof(*data) + sizeof(*fw_app) * n; 318 data_len = sizeof(*data) + sizeof(*fw_app) * n;
319 data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping, 319 data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
320 GFP_KERNEL); 320 GFP_KERNEL);
321 if (!data) 321 if (!data)
322 return -ENOMEM; 322 return -ENOMEM;
323 323
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 140dbd62106d..7f56032e44ac 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -85,8 +85,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
85 return -EFAULT; 85 return -EFAULT;
86 } 86 }
87 87
88 data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, 88 data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
89 &data_dma_addr, GFP_KERNEL); 89 &data_dma_addr, GFP_KERNEL);
90 if (!data_addr) 90 if (!data_addr)
91 return -ENOMEM; 91 return -ENOMEM;
92 92
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f1aaac8e6268..0a0995894ddb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -386,8 +386,8 @@ struct hwrm_err_output {
386#define HWRM_VERSION_MAJOR 1 386#define HWRM_VERSION_MAJOR 1
387#define HWRM_VERSION_MINOR 10 387#define HWRM_VERSION_MINOR 10
388#define HWRM_VERSION_UPDATE 0 388#define HWRM_VERSION_UPDATE 0
389#define HWRM_VERSION_RSVD 33 389#define HWRM_VERSION_RSVD 35
390#define HWRM_VERSION_STR "1.10.0.33" 390#define HWRM_VERSION_STR "1.10.0.35"
391 391
392/* hwrm_ver_get_input (size:192b/24B) */ 392/* hwrm_ver_get_input (size:192b/24B) */
393struct hwrm_ver_get_input { 393struct hwrm_ver_get_input {
@@ -1184,6 +1184,7 @@ struct hwrm_func_cfg_input {
1184 #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL 1184 #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
1185 #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL 1185 #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
1186 #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL 1186 #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
1187 #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
1187 __le32 enables; 1188 __le32 enables;
1188 #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL 1189 #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
1189 #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL 1190 #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 5db9f4158e62..134ae2862efa 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
1288 * for transmits, we just free buffers. 1288 * for transmits, we just free buffers.
1289 */ 1289 */
1290 1290
1291 dev_kfree_skb_irq(sb); 1291 dev_consume_skb_irq(sb);
1292 1292
1293 /* 1293 /*
1294 * .. and advance to the next buffer. 1294 * .. and advance to the next buffer.
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3b1397af81f7..b1627dd5f2fd 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8712,10 +8712,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
8712 if (!i && tg3_flag(tp, ENABLE_RSS)) 8712 if (!i && tg3_flag(tp, ENABLE_RSS))
8713 continue; 8713 continue;
8714 8714
8715 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, 8715 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8716 TG3_RX_RCB_RING_BYTES(tp), 8716 TG3_RX_RCB_RING_BYTES(tp),
8717 &tnapi->rx_rcb_mapping, 8717 &tnapi->rx_rcb_mapping,
8718 GFP_KERNEL); 8718 GFP_KERNEL);
8719 if (!tnapi->rx_rcb) 8719 if (!tnapi->rx_rcb)
8720 goto err_out; 8720 goto err_out;
8721 } 8721 }
@@ -8768,9 +8768,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8768{ 8768{
8769 int i; 8769 int i;
8770 8770
8771 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, 8771 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8772 sizeof(struct tg3_hw_stats), 8772 sizeof(struct tg3_hw_stats),
8773 &tp->stats_mapping, GFP_KERNEL); 8773 &tp->stats_mapping, GFP_KERNEL);
8774 if (!tp->hw_stats) 8774 if (!tp->hw_stats)
8775 goto err_out; 8775 goto err_out;
8776 8776
@@ -8778,10 +8778,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8778 struct tg3_napi *tnapi = &tp->napi[i]; 8778 struct tg3_napi *tnapi = &tp->napi[i];
8779 struct tg3_hw_status *sblk; 8779 struct tg3_hw_status *sblk;
8780 8780
8781 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, 8781 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8782 TG3_HW_STATUS_SIZE, 8782 TG3_HW_STATUS_SIZE,
8783 &tnapi->status_mapping, 8783 &tnapi->status_mapping,
8784 GFP_KERNEL); 8784 GFP_KERNEL);
8785 if (!tnapi->hw_status) 8785 if (!tnapi->hw_status)
8786 goto err_out; 8786 goto err_out;
8787 8787
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 3d45f4c92cf6..9bbaad9f3d63 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -643,6 +643,7 @@
643#define MACB_CAPS_JUMBO 0x00000020 643#define MACB_CAPS_JUMBO 0x00000020
644#define MACB_CAPS_GEM_HAS_PTP 0x00000040 644#define MACB_CAPS_GEM_HAS_PTP 0x00000040
645#define MACB_CAPS_BD_RD_PREFETCH 0x00000080 645#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
646#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
646#define MACB_CAPS_FIFO_MODE 0x10000000 647#define MACB_CAPS_FIFO_MODE 0x10000000
647#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 648#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
648#define MACB_CAPS_SG_DISABLED 0x40000000 649#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -1214,6 +1215,8 @@ struct macb {
1214 1215
1215 int rx_bd_rd_prefetch; 1216 int rx_bd_rd_prefetch;
1216 int tx_bd_rd_prefetch; 1217 int tx_bd_rd_prefetch;
1218
1219 u32 rx_intr_mask;
1217}; 1220};
1218 1221
1219#ifdef CONFIG_MACB_USE_HWSTAMP 1222#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b126926ef7f5..2b2882615e8b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -56,8 +56,7 @@
56/* level of occupied TX descriptors under which we wake up TX process */ 56/* level of occupied TX descriptors under which we wake up TX process */
57#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 57#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
58 58
59#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 59#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
60 | MACB_BIT(ISR_ROVR))
61#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 60#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
62 | MACB_BIT(ISR_RLE) \ 61 | MACB_BIT(ISR_RLE) \
63 | MACB_BIT(TXERR)) 62 | MACB_BIT(TXERR))
@@ -1270,7 +1269,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
1270 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1269 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1271 napi_reschedule(napi); 1270 napi_reschedule(napi);
1272 } else { 1271 } else {
1273 queue_writel(queue, IER, MACB_RX_INT_FLAGS); 1272 queue_writel(queue, IER, bp->rx_intr_mask);
1274 } 1273 }
1275 } 1274 }
1276 1275
@@ -1288,7 +1287,7 @@ static void macb_hresp_error_task(unsigned long data)
1288 u32 ctrl; 1287 u32 ctrl;
1289 1288
1290 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1289 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1291 queue_writel(queue, IDR, MACB_RX_INT_FLAGS | 1290 queue_writel(queue, IDR, bp->rx_intr_mask |
1292 MACB_TX_INT_FLAGS | 1291 MACB_TX_INT_FLAGS |
1293 MACB_BIT(HRESP)); 1292 MACB_BIT(HRESP));
1294 } 1293 }
@@ -1318,7 +1317,7 @@ static void macb_hresp_error_task(unsigned long data)
1318 1317
1319 /* Enable interrupts */ 1318 /* Enable interrupts */
1320 queue_writel(queue, IER, 1319 queue_writel(queue, IER,
1321 MACB_RX_INT_FLAGS | 1320 bp->rx_intr_mask |
1322 MACB_TX_INT_FLAGS | 1321 MACB_TX_INT_FLAGS |
1323 MACB_BIT(HRESP)); 1322 MACB_BIT(HRESP));
1324 } 1323 }
@@ -1372,14 +1371,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1372 (unsigned int)(queue - bp->queues), 1371 (unsigned int)(queue - bp->queues),
1373 (unsigned long)status); 1372 (unsigned long)status);
1374 1373
1375 if (status & MACB_RX_INT_FLAGS) { 1374 if (status & bp->rx_intr_mask) {
1376 /* There's no point taking any more interrupts 1375 /* There's no point taking any more interrupts
1377 * until we have processed the buffers. The 1376 * until we have processed the buffers. The
1378 * scheduling call may fail if the poll routine 1377 * scheduling call may fail if the poll routine
1379 * is already scheduled, so disable interrupts 1378 * is already scheduled, so disable interrupts
1380 * now. 1379 * now.
1381 */ 1380 */
1382 queue_writel(queue, IDR, MACB_RX_INT_FLAGS); 1381 queue_writel(queue, IDR, bp->rx_intr_mask);
1383 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1382 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1384 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1383 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1385 1384
@@ -1412,8 +1411,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1412 /* There is a hardware issue under heavy load where DMA can 1411 /* There is a hardware issue under heavy load where DMA can
1413 * stop, this causes endless "used buffer descriptor read" 1412 * stop, this causes endless "used buffer descriptor read"
1414 * interrupts but it can be cleared by re-enabling RX. See 1413 * interrupts but it can be cleared by re-enabling RX. See
1415 * the at91 manual, section 41.3.1 or the Zynq manual 1414 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
1416 * section 16.7.4 for details. 1415 * section 16.7.4 for details. RXUBR is only enabled for
1416 * these two versions.
1417 */ 1417 */
1418 if (status & MACB_BIT(RXUBR)) { 1418 if (status & MACB_BIT(RXUBR)) {
1419 ctrl = macb_readl(bp, NCR); 1419 ctrl = macb_readl(bp, NCR);
@@ -1738,12 +1738,8 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
1738 *skb = nskb; 1738 *skb = nskb;
1739 } 1739 }
1740 1740
1741 if (padlen) { 1741 if (padlen > ETH_FCS_LEN)
1742 if (padlen >= ETH_FCS_LEN) 1742 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1743 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1744 else
1745 skb_trim(*skb, ETH_FCS_LEN - padlen);
1746 }
1747 1743
1748add_fcs: 1744add_fcs:
1749 /* set FCS to packet */ 1745 /* set FCS to packet */
@@ -2263,7 +2259,7 @@ static void macb_init_hw(struct macb *bp)
2263 2259
2264 /* Enable interrupts */ 2260 /* Enable interrupts */
2265 queue_writel(queue, IER, 2261 queue_writel(queue, IER,
2266 MACB_RX_INT_FLAGS | 2262 bp->rx_intr_mask |
2267 MACB_TX_INT_FLAGS | 2263 MACB_TX_INT_FLAGS |
2268 MACB_BIT(HRESP)); 2264 MACB_BIT(HRESP));
2269 } 2265 }
@@ -3911,6 +3907,7 @@ static const struct macb_config sama5d4_config = {
3911}; 3907};
3912 3908
3913static const struct macb_config emac_config = { 3909static const struct macb_config emac_config = {
3910 .caps = MACB_CAPS_NEEDS_RSTONUBR,
3914 .clk_init = at91ether_clk_init, 3911 .clk_init = at91ether_clk_init,
3915 .init = at91ether_init, 3912 .init = at91ether_init,
3916}; 3913};
@@ -3932,7 +3929,8 @@ static const struct macb_config zynqmp_config = {
3932}; 3929};
3933 3930
3934static const struct macb_config zynq_config = { 3931static const struct macb_config zynq_config = {
3935 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, 3932 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
3933 MACB_CAPS_NEEDS_RSTONUBR,
3936 .dma_burst_length = 16, 3934 .dma_burst_length = 16,
3937 .clk_init = macb_clk_init, 3935 .clk_init = macb_clk_init,
3938 .init = macb_init, 3936 .init = macb_init,
@@ -4087,6 +4085,10 @@ static int macb_probe(struct platform_device *pdev)
4087 macb_dma_desc_get_size(bp); 4085 macb_dma_desc_get_size(bp);
4088 } 4086 }
4089 4087
4088 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
4089 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
4090 bp->rx_intr_mask |= MACB_BIT(RXUBR);
4091
4090 mac = of_get_mac_address(np); 4092 mac = of_get_mac_address(np);
4091 if (mac) { 4093 if (mac) {
4092 ether_addr_copy(bp->dev->dev_addr, mac); 4094 ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 5f03199a3acf..05f4a3b21e29 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -54,7 +54,6 @@ config CAVIUM_PTP
54 tristate "Cavium PTP coprocessor as PTP clock" 54 tristate "Cavium PTP coprocessor as PTP clock"
55 depends on 64BIT && PCI 55 depends on 64BIT && PCI
56 imply PTP_1588_CLOCK 56 imply PTP_1588_CLOCK
57 default y
58 ---help--- 57 ---help---
59 This driver adds support for the Precision Time Protocol Clocks and 58 This driver adds support for the Precision Time Protocol Clocks and
60 Timestamping coprocessor (PTP) found on Cavium processors. 59 Timestamping coprocessor (PTP) found on Cavium processors.
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index f4d81765221e..62636c1ed141 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -271,7 +271,7 @@ struct xcast_addr_list {
271}; 271};
272 272
273struct nicvf_work { 273struct nicvf_work {
274 struct delayed_work work; 274 struct work_struct work;
275 u8 mode; 275 u8 mode;
276 struct xcast_addr_list *mc; 276 struct xcast_addr_list *mc;
277}; 277};
@@ -327,7 +327,11 @@ struct nicvf {
327 struct nicvf_work rx_mode_work; 327 struct nicvf_work rx_mode_work;
328 /* spinlock to protect workqueue arguments from concurrent access */ 328 /* spinlock to protect workqueue arguments from concurrent access */
329 spinlock_t rx_mode_wq_lock; 329 spinlock_t rx_mode_wq_lock;
330 330 /* workqueue for handling kernel ndo_set_rx_mode() calls */
331 struct workqueue_struct *nicvf_rx_mode_wq;
332 /* mutex to protect VF's mailbox contents from concurrent access */
333 struct mutex rx_mode_mtx;
334 struct delayed_work link_change_work;
331 /* PTP timestamp */ 335 /* PTP timestamp */
332 struct cavium_ptp *ptp_clock; 336 struct cavium_ptp *ptp_clock;
333 /* Inbound timestamping is on */ 337 /* Inbound timestamping is on */
@@ -575,10 +579,8 @@ struct set_ptp {
575 579
576struct xcast { 580struct xcast {
577 u8 msg; 581 u8 msg;
578 union { 582 u8 mode;
579 u8 mode; 583 u64 mac:48;
580 u64 mac;
581 } data;
582}; 584};
583 585
584/* 128 bit shared memory between PF and each VF */ 586/* 128 bit shared memory between PF and each VF */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6c8dcb65ff03..c90252829ed3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -57,14 +57,8 @@ struct nicpf {
57#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) 57#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
58#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) 58#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
59 u8 *vf_lmac_map; 59 u8 *vf_lmac_map;
60 struct delayed_work dwork;
61 struct workqueue_struct *check_link;
62 u8 *link;
63 u8 *duplex;
64 u32 *speed;
65 u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; 60 u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
66 u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; 61 u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
67 bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
68 62
69 /* MSI-X */ 63 /* MSI-X */
70 u8 num_vec; 64 u8 num_vec;
@@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
929 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); 923 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
930} 924}
931 925
926/* Get BGX LMAC link status and update corresponding VF
927 * if there is a change, valid only if internal L2 switch
928 * is not present otherwise VF link is always treated as up
929 */
930static void nic_link_status_get(struct nicpf *nic, u8 vf)
931{
932 union nic_mbx mbx = {};
933 struct bgx_link_status link;
934 u8 bgx, lmac;
935
936 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
937
938 /* Get BGX, LMAC indices for the VF */
939 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
940 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
941
942 /* Get interface link status */
943 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
944
945 /* Send a mbox message to VF with current link status */
946 mbx.link_status.link_up = link.link_up;
947 mbx.link_status.duplex = link.duplex;
948 mbx.link_status.speed = link.speed;
949 mbx.link_status.mac_type = link.mac_type;
950
951 /* reply with link status */
952 nic_send_msg_to_vf(nic, vf, &mbx);
953}
954
932/* Interrupt handler to handle mailbox messages from VFs */ 955/* Interrupt handler to handle mailbox messages from VFs */
933static void nic_handle_mbx_intr(struct nicpf *nic, int vf) 956static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
934{ 957{
@@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
941 int i; 964 int i;
942 int ret = 0; 965 int ret = 0;
943 966
944 nic->mbx_lock[vf] = true;
945
946 mbx_addr = nic_get_mbx_addr(vf); 967 mbx_addr = nic_get_mbx_addr(vf);
947 mbx_data = (u64 *)&mbx; 968 mbx_data = (u64 *)&mbx;
948 969
@@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
957 switch (mbx.msg.msg) { 978 switch (mbx.msg.msg) {
958 case NIC_MBOX_MSG_READY: 979 case NIC_MBOX_MSG_READY:
959 nic_mbx_send_ready(nic, vf); 980 nic_mbx_send_ready(nic, vf);
960 if (vf < nic->num_vf_en) { 981 return;
961 nic->link[vf] = 0;
962 nic->duplex[vf] = 0;
963 nic->speed[vf] = 0;
964 }
965 goto unlock;
966 case NIC_MBOX_MSG_QS_CFG: 982 case NIC_MBOX_MSG_QS_CFG:
967 reg_addr = NIC_PF_QSET_0_127_CFG | 983 reg_addr = NIC_PF_QSET_0_127_CFG |
968 (mbx.qs.num << NIC_QS_ID_SHIFT); 984 (mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1031 break; 1047 break;
1032 case NIC_MBOX_MSG_RSS_SIZE: 1048 case NIC_MBOX_MSG_RSS_SIZE:
1033 nic_send_rss_size(nic, vf); 1049 nic_send_rss_size(nic, vf);
1034 goto unlock; 1050 return;
1035 case NIC_MBOX_MSG_RSS_CFG: 1051 case NIC_MBOX_MSG_RSS_CFG:
1036 case NIC_MBOX_MSG_RSS_CFG_CONT: 1052 case NIC_MBOX_MSG_RSS_CFG_CONT:
1037 nic_config_rss(nic, &mbx.rss_cfg); 1053 nic_config_rss(nic, &mbx.rss_cfg);
@@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1039 case NIC_MBOX_MSG_CFG_DONE: 1055 case NIC_MBOX_MSG_CFG_DONE:
1040 /* Last message of VF config msg sequence */ 1056 /* Last message of VF config msg sequence */
1041 nic_enable_vf(nic, vf, true); 1057 nic_enable_vf(nic, vf, true);
1042 goto unlock; 1058 break;
1043 case NIC_MBOX_MSG_SHUTDOWN: 1059 case NIC_MBOX_MSG_SHUTDOWN:
1044 /* First msg in VF teardown sequence */ 1060 /* First msg in VF teardown sequence */
1045 if (vf >= nic->num_vf_en) 1061 if (vf >= nic->num_vf_en)
@@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1049 break; 1065 break;
1050 case NIC_MBOX_MSG_ALLOC_SQS: 1066 case NIC_MBOX_MSG_ALLOC_SQS:
1051 nic_alloc_sqs(nic, &mbx.sqs_alloc); 1067 nic_alloc_sqs(nic, &mbx.sqs_alloc);
1052 goto unlock; 1068 return;
1053 case NIC_MBOX_MSG_NICVF_PTR: 1069 case NIC_MBOX_MSG_NICVF_PTR:
1054 nic->nicvf[vf] = mbx.nicvf.nicvf; 1070 nic->nicvf[vf] = mbx.nicvf.nicvf;
1055 break; 1071 break;
1056 case NIC_MBOX_MSG_PNICVF_PTR: 1072 case NIC_MBOX_MSG_PNICVF_PTR:
1057 nic_send_pnicvf(nic, vf); 1073 nic_send_pnicvf(nic, vf);
1058 goto unlock; 1074 return;
1059 case NIC_MBOX_MSG_SNICVF_PTR: 1075 case NIC_MBOX_MSG_SNICVF_PTR:
1060 nic_send_snicvf(nic, &mbx.nicvf); 1076 nic_send_snicvf(nic, &mbx.nicvf);
1061 goto unlock; 1077 return;
1062 case NIC_MBOX_MSG_BGX_STATS: 1078 case NIC_MBOX_MSG_BGX_STATS:
1063 nic_get_bgx_stats(nic, &mbx.bgx_stats); 1079 nic_get_bgx_stats(nic, &mbx.bgx_stats);
1064 goto unlock; 1080 return;
1065 case NIC_MBOX_MSG_LOOPBACK: 1081 case NIC_MBOX_MSG_LOOPBACK:
1066 ret = nic_config_loopback(nic, &mbx.lbk); 1082 ret = nic_config_loopback(nic, &mbx.lbk);
1067 break; 1083 break;
@@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1070 break; 1086 break;
1071 case NIC_MBOX_MSG_PFC: 1087 case NIC_MBOX_MSG_PFC:
1072 nic_pause_frame(nic, vf, &mbx.pfc); 1088 nic_pause_frame(nic, vf, &mbx.pfc);
1073 goto unlock; 1089 return;
1074 case NIC_MBOX_MSG_PTP_CFG: 1090 case NIC_MBOX_MSG_PTP_CFG:
1075 nic_config_timestamp(nic, vf, &mbx.ptp); 1091 nic_config_timestamp(nic, vf, &mbx.ptp);
1076 break; 1092 break;
@@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1094 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1110 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1095 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1111 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1096 bgx_set_dmac_cam_filter(nic->node, bgx, lmac, 1112 bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
1097 mbx.xcast.data.mac, 1113 mbx.xcast.mac,
1098 vf < NIC_VF_PER_MBX_REG ? vf : 1114 vf < NIC_VF_PER_MBX_REG ? vf :
1099 vf - NIC_VF_PER_MBX_REG); 1115 vf - NIC_VF_PER_MBX_REG);
1100 break; 1116 break;
@@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1106 } 1122 }
1107 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1123 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1108 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1124 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1109 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode); 1125 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
1110 break; 1126 break;
1127 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
1128 if (vf >= nic->num_vf_en) {
1129 ret = -1; /* NACK */
1130 break;
1131 }
1132 nic_link_status_get(nic, vf);
1133 return;
1111 default: 1134 default:
1112 dev_err(&nic->pdev->dev, 1135 dev_err(&nic->pdev->dev,
1113 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 1136 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1121 mbx.msg.msg, vf); 1144 mbx.msg.msg, vf);
1122 nic_mbx_send_nack(nic, vf); 1145 nic_mbx_send_nack(nic, vf);
1123 } 1146 }
1124unlock:
1125 nic->mbx_lock[vf] = false;
1126} 1147}
1127 1148
1128static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) 1149static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
@@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
1270 return 0; 1291 return 0;
1271} 1292}
1272 1293
1273/* Poll for BGX LMAC link status and update corresponding VF
1274 * if there is a change, valid only if internal L2 switch
1275 * is not present otherwise VF link is always treated as up
1276 */
1277static void nic_poll_for_link(struct work_struct *work)
1278{
1279 union nic_mbx mbx = {};
1280 struct nicpf *nic;
1281 struct bgx_link_status link;
1282 u8 vf, bgx, lmac;
1283
1284 nic = container_of(work, struct nicpf, dwork.work);
1285
1286 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1287
1288 for (vf = 0; vf < nic->num_vf_en; vf++) {
1289 /* Poll only if VF is UP */
1290 if (!nic->vf_enabled[vf])
1291 continue;
1292
1293 /* Get BGX, LMAC indices for the VF */
1294 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1295 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1296 /* Get interface link status */
1297 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
1298
1299 /* Inform VF only if link status changed */
1300 if (nic->link[vf] == link.link_up)
1301 continue;
1302
1303 if (!nic->mbx_lock[vf]) {
1304 nic->link[vf] = link.link_up;
1305 nic->duplex[vf] = link.duplex;
1306 nic->speed[vf] = link.speed;
1307
1308 /* Send a mbox message to VF with current link status */
1309 mbx.link_status.link_up = link.link_up;
1310 mbx.link_status.duplex = link.duplex;
1311 mbx.link_status.speed = link.speed;
1312 mbx.link_status.mac_type = link.mac_type;
1313 nic_send_msg_to_vf(nic, vf, &mbx);
1314 }
1315 }
1316 queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
1317}
1318
1319static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1294static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1320{ 1295{
1321 struct device *dev = &pdev->dev; 1296 struct device *dev = &pdev->dev;
@@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1384 if (!nic->vf_lmac_map) 1359 if (!nic->vf_lmac_map)
1385 goto err_release_regions; 1360 goto err_release_regions;
1386 1361
1387 nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
1388 if (!nic->link)
1389 goto err_release_regions;
1390
1391 nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
1392 if (!nic->duplex)
1393 goto err_release_regions;
1394
1395 nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL);
1396 if (!nic->speed)
1397 goto err_release_regions;
1398
1399 /* Initialize hardware */ 1362 /* Initialize hardware */
1400 nic_init_hw(nic); 1363 nic_init_hw(nic);
1401 1364
@@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1411 if (err) 1374 if (err)
1412 goto err_unregister_interrupts; 1375 goto err_unregister_interrupts;
1413 1376
1414 /* Register a physical link status poll fn() */
1415 nic->check_link = alloc_workqueue("check_link_status",
1416 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1417 if (!nic->check_link) {
1418 err = -ENOMEM;
1419 goto err_disable_sriov;
1420 }
1421
1422 INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
1423 queue_delayed_work(nic->check_link, &nic->dwork, 0);
1424
1425 return 0; 1377 return 0;
1426 1378
1427err_disable_sriov:
1428 if (nic->flags & NIC_SRIOV_ENABLED)
1429 pci_disable_sriov(pdev);
1430err_unregister_interrupts: 1379err_unregister_interrupts:
1431 nic_unregister_interrupts(nic); 1380 nic_unregister_interrupts(nic);
1432err_release_regions: 1381err_release_regions:
@@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev)
1447 if (nic->flags & NIC_SRIOV_ENABLED) 1396 if (nic->flags & NIC_SRIOV_ENABLED)
1448 pci_disable_sriov(pdev); 1397 pci_disable_sriov(pdev);
1449 1398
1450 if (nic->check_link) {
1451 /* Destroy work Queue */
1452 cancel_delayed_work_sync(&nic->dwork);
1453 destroy_workqueue(nic->check_link);
1454 }
1455
1456 nic_unregister_interrupts(nic); 1399 nic_unregister_interrupts(nic);
1457 pci_release_regions(pdev); 1400 pci_release_regions(pdev);
1458 1401
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 88f8a8fa93cd..503cfadff4ac 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444);
68MODULE_PARM_DESC(cpi_alg, 68MODULE_PARM_DESC(cpi_alg,
69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
70 70
71/* workqueue for handling kernel ndo_set_rx_mode() calls */
72static struct workqueue_struct *nicvf_rx_mode_wq;
73
74static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) 71static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
75{ 72{
76 if (nic->sqs_mode) 73 if (nic->sqs_mode)
@@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
127{ 124{
128 int timeout = NIC_MBOX_MSG_TIMEOUT; 125 int timeout = NIC_MBOX_MSG_TIMEOUT;
129 int sleep = 10; 126 int sleep = 10;
127 int ret = 0;
128
129 mutex_lock(&nic->rx_mode_mtx);
130 130
131 nic->pf_acked = false; 131 nic->pf_acked = false;
132 nic->pf_nacked = false; 132 nic->pf_nacked = false;
@@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
139 netdev_err(nic->netdev, 139 netdev_err(nic->netdev,
140 "PF NACK to mbox msg 0x%02x from VF%d\n", 140 "PF NACK to mbox msg 0x%02x from VF%d\n",
141 (mbx->msg.msg & 0xFF), nic->vf_id); 141 (mbx->msg.msg & 0xFF), nic->vf_id);
142 return -EINVAL; 142 ret = -EINVAL;
143 break;
143 } 144 }
144 msleep(sleep); 145 msleep(sleep);
145 if (nic->pf_acked) 146 if (nic->pf_acked)
@@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
149 netdev_err(nic->netdev, 150 netdev_err(nic->netdev,
150 "PF didn't ACK to mbox msg 0x%02x from VF%d\n", 151 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
151 (mbx->msg.msg & 0xFF), nic->vf_id); 152 (mbx->msg.msg & 0xFF), nic->vf_id);
152 return -EBUSY; 153 ret = -EBUSY;
154 break;
153 } 155 }
154 } 156 }
155 return 0; 157 mutex_unlock(&nic->rx_mode_mtx);
158 return ret;
156} 159}
157 160
158/* Checks if VF is able to comminicate with PF 161/* Checks if VF is able to comminicate with PF
@@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
172 return 1; 175 return 1;
173} 176}
174 177
178static void nicvf_send_cfg_done(struct nicvf *nic)
179{
180 union nic_mbx mbx = {};
181
182 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
183 if (nicvf_send_msg_to_pf(nic, &mbx)) {
184 netdev_err(nic->netdev,
185 "PF didn't respond to CFG DONE msg\n");
186 }
187}
188
175static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) 189static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
176{ 190{
177 if (bgx->rx) 191 if (bgx->rx)
@@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
228 break; 242 break;
229 case NIC_MBOX_MSG_BGX_LINK_CHANGE: 243 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
230 nic->pf_acked = true; 244 nic->pf_acked = true;
231 nic->link_up = mbx.link_status.link_up; 245 if (nic->link_up != mbx.link_status.link_up) {
232 nic->duplex = mbx.link_status.duplex; 246 nic->link_up = mbx.link_status.link_up;
233 nic->speed = mbx.link_status.speed; 247 nic->duplex = mbx.link_status.duplex;
234 nic->mac_type = mbx.link_status.mac_type; 248 nic->speed = mbx.link_status.speed;
235 if (nic->link_up) { 249 nic->mac_type = mbx.link_status.mac_type;
236 netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", 250 if (nic->link_up) {
237 nic->speed, 251 netdev_info(nic->netdev,
238 nic->duplex == DUPLEX_FULL ? 252 "Link is Up %d Mbps %s duplex\n",
239 "Full" : "Half"); 253 nic->speed,
240 netif_carrier_on(nic->netdev); 254 nic->duplex == DUPLEX_FULL ?
241 netif_tx_start_all_queues(nic->netdev); 255 "Full" : "Half");
242 } else { 256 netif_carrier_on(nic->netdev);
243 netdev_info(nic->netdev, "Link is Down\n"); 257 netif_tx_start_all_queues(nic->netdev);
244 netif_carrier_off(nic->netdev); 258 } else {
245 netif_tx_stop_all_queues(nic->netdev); 259 netdev_info(nic->netdev, "Link is Down\n");
260 netif_carrier_off(nic->netdev);
261 netif_tx_stop_all_queues(nic->netdev);
262 }
246 } 263 }
247 break; 264 break;
248 case NIC_MBOX_MSG_ALLOC_SQS: 265 case NIC_MBOX_MSG_ALLOC_SQS:
@@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
1311 struct nicvf_cq_poll *cq_poll = NULL; 1328 struct nicvf_cq_poll *cq_poll = NULL;
1312 union nic_mbx mbx = {}; 1329 union nic_mbx mbx = {};
1313 1330
1331 cancel_delayed_work_sync(&nic->link_change_work);
1332
1333 /* wait till all queued set_rx_mode tasks completes */
1334 drain_workqueue(nic->nicvf_rx_mode_wq);
1335
1314 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1336 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1315 nicvf_send_msg_to_pf(nic, &mbx); 1337 nicvf_send_msg_to_pf(nic, &mbx);
1316 1338
@@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1410 return nicvf_send_msg_to_pf(nic, &mbx); 1432 return nicvf_send_msg_to_pf(nic, &mbx);
1411} 1433}
1412 1434
1435static void nicvf_link_status_check_task(struct work_struct *work_arg)
1436{
1437 struct nicvf *nic = container_of(work_arg,
1438 struct nicvf,
1439 link_change_work.work);
1440 union nic_mbx mbx = {};
1441 mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1442 nicvf_send_msg_to_pf(nic, &mbx);
1443 queue_delayed_work(nic->nicvf_rx_mode_wq,
1444 &nic->link_change_work, 2 * HZ);
1445}
1446
1413int nicvf_open(struct net_device *netdev) 1447int nicvf_open(struct net_device *netdev)
1414{ 1448{
1415 int cpu, err, qidx; 1449 int cpu, err, qidx;
1416 struct nicvf *nic = netdev_priv(netdev); 1450 struct nicvf *nic = netdev_priv(netdev);
1417 struct queue_set *qs = nic->qs; 1451 struct queue_set *qs = nic->qs;
1418 struct nicvf_cq_poll *cq_poll = NULL; 1452 struct nicvf_cq_poll *cq_poll = NULL;
1419 union nic_mbx mbx = {}; 1453
1454 /* wait till all queued set_rx_mode tasks completes if any */
1455 drain_workqueue(nic->nicvf_rx_mode_wq);
1420 1456
1421 netif_carrier_off(netdev); 1457 netif_carrier_off(netdev);
1422 1458
@@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev)
1512 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1548 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1513 1549
1514 /* Send VF config done msg to PF */ 1550 /* Send VF config done msg to PF */
1515 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 1551 nicvf_send_cfg_done(nic);
1516 nicvf_write_to_mbx(nic, &mbx); 1552
1553 INIT_DELAYED_WORK(&nic->link_change_work,
1554 nicvf_link_status_check_task);
1555 queue_delayed_work(nic->nicvf_rx_mode_wq,
1556 &nic->link_change_work, 0);
1517 1557
1518 return 0; 1558 return 0;
1519cleanup: 1559cleanup:
@@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1941 1981
1942 /* flush DMAC filters and reset RX mode */ 1982 /* flush DMAC filters and reset RX mode */
1943 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; 1983 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1944 nicvf_send_msg_to_pf(nic, &mbx); 1984 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1985 goto free_mc;
1945 1986
1946 if (mode & BGX_XCAST_MCAST_FILTER) { 1987 if (mode & BGX_XCAST_MCAST_FILTER) {
1947 /* once enabling filtering, we need to signal to PF to add 1988 /* once enabling filtering, we need to signal to PF to add
1948 * its' own LMAC to the filter to accept packets for it. 1989 * its' own LMAC to the filter to accept packets for it.
1949 */ 1990 */
1950 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 1991 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1951 mbx.xcast.data.mac = 0; 1992 mbx.xcast.mac = 0;
1952 nicvf_send_msg_to_pf(nic, &mbx); 1993 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1994 goto free_mc;
1953 } 1995 }
1954 1996
1955 /* check if we have any specific MACs to be added to PF DMAC filter */ 1997 /* check if we have any specific MACs to be added to PF DMAC filter */
@@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1957 /* now go through kernel list of MACs and add them one by one */ 1999 /* now go through kernel list of MACs and add them one by one */
1958 for (idx = 0; idx < mc_addrs->count; idx++) { 2000 for (idx = 0; idx < mc_addrs->count; idx++) {
1959 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 2001 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1960 mbx.xcast.data.mac = mc_addrs->mc[idx]; 2002 mbx.xcast.mac = mc_addrs->mc[idx];
1961 nicvf_send_msg_to_pf(nic, &mbx); 2003 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
2004 goto free_mc;
1962 } 2005 }
1963 kfree(mc_addrs);
1964 } 2006 }
1965 2007
1966 /* and finally set rx mode for PF accordingly */ 2008 /* and finally set rx mode for PF accordingly */
1967 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; 2009 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
1968 mbx.xcast.data.mode = mode; 2010 mbx.xcast.mode = mode;
1969 2011
1970 nicvf_send_msg_to_pf(nic, &mbx); 2012 nicvf_send_msg_to_pf(nic, &mbx);
2013free_mc:
2014 kfree(mc_addrs);
1971} 2015}
1972 2016
1973static void nicvf_set_rx_mode_task(struct work_struct *work_arg) 2017static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1974{ 2018{
1975 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, 2019 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
1976 work.work); 2020 work);
1977 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); 2021 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
1978 u8 mode; 2022 u8 mode;
1979 struct xcast_addr_list *mc; 2023 struct xcast_addr_list *mc;
@@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
2030 kfree(nic->rx_mode_work.mc); 2074 kfree(nic->rx_mode_work.mc);
2031 nic->rx_mode_work.mc = mc_list; 2075 nic->rx_mode_work.mc = mc_list;
2032 nic->rx_mode_work.mode = mode; 2076 nic->rx_mode_work.mode = mode;
2033 queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); 2077 queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
2034 spin_unlock(&nic->rx_mode_wq_lock); 2078 spin_unlock(&nic->rx_mode_wq_lock);
2035} 2079}
2036 2080
@@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2187 2231
2188 INIT_WORK(&nic->reset_task, nicvf_reset_task); 2232 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2189 2233
2190 INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); 2234 nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
2235 WQ_MEM_RECLAIM,
2236 nic->vf_id);
2237 INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2191 spin_lock_init(&nic->rx_mode_wq_lock); 2238 spin_lock_init(&nic->rx_mode_wq_lock);
2239 mutex_init(&nic->rx_mode_mtx);
2192 2240
2193 err = register_netdev(netdev); 2241 err = register_netdev(netdev);
2194 if (err) { 2242 if (err) {
@@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev)
2228 nic = netdev_priv(netdev); 2276 nic = netdev_priv(netdev);
2229 pnetdev = nic->pnicvf->netdev; 2277 pnetdev = nic->pnicvf->netdev;
2230 2278
2231 cancel_delayed_work_sync(&nic->rx_mode_work.work);
2232
2233 /* Check if this Qset is assigned to different VF. 2279 /* Check if this Qset is assigned to different VF.
2234 * If yes, clean primary and all secondary Qsets. 2280 * If yes, clean primary and all secondary Qsets.
2235 */ 2281 */
2236 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) 2282 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
2237 unregister_netdev(pnetdev); 2283 unregister_netdev(pnetdev);
2284 if (nic->nicvf_rx_mode_wq) {
2285 destroy_workqueue(nic->nicvf_rx_mode_wq);
2286 nic->nicvf_rx_mode_wq = NULL;
2287 }
2238 nicvf_unregister_interrupts(nic); 2288 nicvf_unregister_interrupts(nic);
2239 pci_set_drvdata(pdev, NULL); 2289 pci_set_drvdata(pdev, NULL);
2240 if (nic->drv_stats) 2290 if (nic->drv_stats)
@@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = {
2261static int __init nicvf_init_module(void) 2311static int __init nicvf_init_module(void)
2262{ 2312{
2263 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 2313 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2264 nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
2265 WQ_MEM_RECLAIM);
2266 return pci_register_driver(&nicvf_driver); 2314 return pci_register_driver(&nicvf_driver);
2267} 2315}
2268 2316
2269static void __exit nicvf_cleanup_module(void) 2317static void __exit nicvf_cleanup_module(void)
2270{ 2318{
2271 if (nicvf_rx_mode_wq) {
2272 destroy_workqueue(nicvf_rx_mode_wq);
2273 nicvf_rx_mode_wq = NULL;
2274 }
2275 pci_unregister_driver(&nicvf_driver); 2319 pci_unregister_driver(&nicvf_driver);
2276} 2320}
2277 2321
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index fcaf18fa3904..5b4d3badcb73 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -59,7 +59,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
59 dmem->q_len = q_len; 59 dmem->q_len = q_len;
60 dmem->size = (desc_size * q_len) + align_bytes; 60 dmem->size = (desc_size * q_len) + align_bytes;
61 /* Save address, need it while freeing */ 61 /* Save address, need it while freeing */
62 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 62 dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
63 &dmem->dma, GFP_KERNEL); 63 &dmem->dma, GFP_KERNEL);
64 if (!dmem->unalign_base) 64 if (!dmem->unalign_base)
65 return -ENOMEM; 65 return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index e337da6ba2a4..673c57b8023f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx)
1217 1217
1218 /* Disable MAC steering (NCSI traffic) */ 1218 /* Disable MAC steering (NCSI traffic) */
1219 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 1219 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); 1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
1221} 1221}
1222 1222
1223static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) 1223static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cbdd20b9ee6f..5cbc54e9eb19 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -60,7 +60,7 @@
60#define RX_DMACX_CAM_EN BIT_ULL(48) 60#define RX_DMACX_CAM_EN BIT_ULL(48)
61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) 61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49)
62#define RX_DMAC_COUNT 32 62#define RX_DMAC_COUNT 32
63#define BGX_CMR_RX_STREERING 0x300 63#define BGX_CMR_RX_STEERING 0x300
64#define RX_TRAFFIC_STEER_RULE_COUNT 8 64#define RX_TRAFFIC_STEER_RULE_COUNT 8
65#define BGX_CMR_CHAN_MSK_AND 0x450 65#define BGX_CMR_CHAN_MSK_AND 0x450
66#define BGX_CMR_BIST_STATUS 0x460 66#define BGX_CMR_BIST_STATUS 0x460
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 20b6e1b3f5e3..89db739b7819 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
620{ 620{
621 size_t len = nelem * elem_size; 621 size_t len = nelem * elem_size;
622 void *s = NULL; 622 void *s = NULL;
623 void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); 623 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
624 624
625 if (!p) 625 if (!p)
626 return NULL; 626 return NULL;
@@ -2381,7 +2381,7 @@ no_mem:
2381 lro_add_page(adap, qs, fl, 2381 lro_add_page(adap, qs, fl,
2382 G_RSPD_LEN(len), 2382 G_RSPD_LEN(len),
2383 flags & F_RSPD_EOP); 2383 flags & F_RSPD_EOP);
2384 goto next_fl; 2384 goto next_fl;
2385 } 2385 }
2386 2386
2387 skb = get_packet_pg(adap, fl, q, 2387 skb = get_packet_pg(adap, fl, q,
@@ -3214,11 +3214,13 @@ void t3_start_sge_timers(struct adapter *adap)
3214 for (i = 0; i < SGE_QSETS; ++i) { 3214 for (i = 0; i < SGE_QSETS; ++i) {
3215 struct sge_qset *q = &adap->sge.qs[i]; 3215 struct sge_qset *q = &adap->sge.qs[i];
3216 3216
3217 if (q->tx_reclaim_timer.function) 3217 if (q->tx_reclaim_timer.function)
3218 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 3218 mod_timer(&q->tx_reclaim_timer,
3219 jiffies + TX_RECLAIM_PERIOD);
3219 3220
3220 if (q->rx_reclaim_timer.function) 3221 if (q->rx_reclaim_timer.function)
3221 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); 3222 mod_timer(&q->rx_reclaim_timer,
3223 jiffies + RX_RECLAIM_PERIOD);
3222 } 3224 }
3223} 3225}
3224 3226
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 080918af773c..0a9f2c596624 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -1082,7 +1082,7 @@ int t3_check_fw_version(struct adapter *adapter)
1082 CH_WARN(adapter, "found newer FW version(%u.%u), " 1082 CH_WARN(adapter, "found newer FW version(%u.%u), "
1083 "driver compiled for version %u.%u\n", major, minor, 1083 "driver compiled for version %u.%u\n", major, minor,
1084 FW_VERSION_MAJOR, FW_VERSION_MINOR); 1084 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1085 return 0; 1085 return 0;
1086 } 1086 }
1087 return -EINVAL; 1087 return -EINVAL;
1088} 1088}
@@ -3619,7 +3619,7 @@ int t3_reset_adapter(struct adapter *adapter)
3619 3619
3620static int init_parity(struct adapter *adap) 3620static int init_parity(struct adapter *adap)
3621{ 3621{
3622 int i, err, addr; 3622 int i, err, addr;
3623 3623
3624 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 3624 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3625 return -EBUSY; 3625 return -EBUSY;
@@ -3806,6 +3806,6 @@ int t3_replay_prep_adapter(struct adapter *adapter)
3806 p->phy.ops->power_down(&p->phy, 1); 3806 p->phy.ops->power_down(&p->phy, 1);
3807 } 3807 }
3808 3808
3809return 0; 3809 return 0;
3810} 3810}
3811 3811
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 9f9d6cae39d5..58a039c3224a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -378,10 +378,10 @@ static void cxgb4_init_ptp_timer(struct adapter *adapter)
378 int err; 378 int err;
379 379
380 memset(&c, 0, sizeof(c)); 380 memset(&c, 0, sizeof(c));
381 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | 381 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
382 FW_CMD_REQUEST_F | 382 FW_CMD_REQUEST_F |
383 FW_CMD_WRITE_F | 383 FW_CMD_WRITE_F |
384 FW_PTP_CMD_PORTID_V(0)); 384 FW_PTP_CMD_PORTID_V(0));
385 c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); 385 c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
386 c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; 386 c.u.scmd.sc = FW_PTP_SC_INIT_TIMER;
387 387
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 9a6065a3fa46..b3654598a2d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -78,7 +78,7 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
78 unsigned long flags; 78 unsigned long flags;
79 79
80 spin_lock_irqsave(&bmap->lock, flags); 80 spin_lock_irqsave(&bmap->lock, flags);
81 __clear_bit(msix_idx, bmap->msix_bmap); 81 __clear_bit(msix_idx, bmap->msix_bmap);
82 spin_unlock_irqrestore(&bmap->lock, flags); 82 spin_unlock_irqrestore(&bmap->lock, flags);
83} 83}
84 84
@@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
660 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; 660 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
661 lld->udb_density = 1 << adap->params.sge.eq_qpp; 661 lld->udb_density = 1 << adap->params.sge.eq_qpp;
662 lld->ucq_density = 1 << adap->params.sge.iq_qpp; 662 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
663 lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
663 lld->filt_mode = adap->params.tp.vlan_pri_map; 664 lld->filt_mode = adap->params.tp.vlan_pri_map;
664 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 665 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
665 for (i = 0; i < NCHAN; i++) 666 for (i = 0; i < NCHAN; i++)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 5fa9a2d5fc4b..21da34a4ca24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -336,6 +336,7 @@ struct cxgb4_lld_info {
336 unsigned int cclk_ps; /* Core clock period in psec */ 336 unsigned int cclk_ps; /* Core clock period in psec */
337 unsigned short udb_density; /* # of user DB/page */ 337 unsigned short udb_density; /* # of user DB/page */
338 unsigned short ucq_density; /* # of user CQs/page */ 338 unsigned short ucq_density; /* # of user CQs/page */
339 unsigned int sge_host_page_size; /* SGE host page size */
339 unsigned short filt_mode; /* filter optional components */ 340 unsigned short filt_mode; /* filter optional components */
340 unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */ 341 unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
341 /* scheduler queue */ 342 /* scheduler queue */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b90188401d4a..fc0bc6458e84 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
694{ 694{
695 size_t len = nelem * elem_size + stat_size; 695 size_t len = nelem * elem_size + stat_size;
696 void *s = NULL; 696 void *s = NULL;
697 void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); 697 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
698 698
699 if (!p) 699 if (!p)
700 return NULL; 700 return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e8c34292a0ec..2b03f6187a24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3794,7 +3794,7 @@ int t4_load_phy_fw(struct adapter *adap,
3794 /* If we have version number support, then check to see if the adapter 3794 /* If we have version number support, then check to see if the adapter
3795 * already has up-to-date PHY firmware loaded. 3795 * already has up-to-date PHY firmware loaded.
3796 */ 3796 */
3797 if (phy_fw_version) { 3797 if (phy_fw_version) {
3798 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); 3798 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3799 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); 3799 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3800 if (ret < 0) 3800 if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 3007e1ac1e61..1d534f0baa69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
756 * Allocate the hardware ring and PCI DMA bus address space for said. 756 * Allocate the hardware ring and PCI DMA bus address space for said.
757 */ 757 */
758 size_t hwlen = nelem * hwsize + stat_size; 758 size_t hwlen = nelem * hwsize + stat_size;
759 void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); 759 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
760 760
761 if (!hwring) 761 if (!hwring)
762 return NULL; 762 return NULL;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 60641e202534..9a7f70db20c7 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1434 * csum is correct or is zero. 1434 * csum is correct or is zero.
1435 */ 1435 */
1436 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && 1436 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
1437 tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { 1437 tcp_udp_csum_ok && outer_csum_ok &&
1438 (ipv4_csum_ok || ipv6)) {
1438 skb->ip_summed = CHECKSUM_UNNECESSARY; 1439 skb->ip_summed = CHECKSUM_UNNECESSARY;
1439 skb->csum_level = encap; 1440 skb->csum_level = encap;
1440 } 1441 }
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 13430f75496c..f1a2da15dd0a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -585,7 +585,7 @@ static void de_tx (struct de_private *de)
585 netif_dbg(de, tx_done, de->dev, 585 netif_dbg(de, tx_done, de->dev,
586 "tx done, slot %d\n", tx_tail); 586 "tx done, slot %d\n", tx_tail);
587 } 587 }
588 dev_kfree_skb_irq(skb); 588 dev_consume_skb_irq(skb);
589 } 589 }
590 590
591next: 591next:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1e9d882c04ef..59a7f0b99069 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1808,9 +1808,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
1808 total_size = buf_len; 1808 total_size = buf_len;
1809 1809
1810 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1810 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1811 get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1811 get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
1812 get_fat_cmd.size, 1812 get_fat_cmd.size,
1813 &get_fat_cmd.dma, GFP_ATOMIC); 1813 &get_fat_cmd.dma, GFP_ATOMIC);
1814 if (!get_fat_cmd.va) 1814 if (!get_fat_cmd.va)
1815 return -ENOMEM; 1815 return -ENOMEM;
1816 1816
@@ -2302,8 +2302,8 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2302 return -EINVAL; 2302 return -EINVAL;
2303 2303
2304 cmd.size = sizeof(struct be_cmd_resp_port_type); 2304 cmd.size = sizeof(struct be_cmd_resp_port_type);
2305 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2305 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2306 GFP_ATOMIC); 2306 GFP_ATOMIC);
2307 if (!cmd.va) { 2307 if (!cmd.va) {
2308 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2308 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2309 return -ENOMEM; 2309 return -ENOMEM;
@@ -3066,8 +3066,8 @@ int lancer_fw_download(struct be_adapter *adapter,
3066 3066
3067 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 3067 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3068 + LANCER_FW_DOWNLOAD_CHUNK; 3068 + LANCER_FW_DOWNLOAD_CHUNK;
3069 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, 3069 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3070 &flash_cmd.dma, GFP_KERNEL); 3070 GFP_KERNEL);
3071 if (!flash_cmd.va) 3071 if (!flash_cmd.va)
3072 return -ENOMEM; 3072 return -ENOMEM;
3073 3073
@@ -3184,8 +3184,8 @@ int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
3184 } 3184 }
3185 3185
3186 flash_cmd.size = sizeof(struct be_cmd_write_flashrom); 3186 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3187 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3187 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3188 GFP_KERNEL); 3188 GFP_KERNEL);
3189 if (!flash_cmd.va) 3189 if (!flash_cmd.va)
3190 return -ENOMEM; 3190 return -ENOMEM;
3191 3191
@@ -3435,8 +3435,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
3435 goto err; 3435 goto err;
3436 } 3436 }
3437 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 3437 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
3438 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3438 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3439 GFP_ATOMIC); 3439 GFP_ATOMIC);
3440 if (!cmd.va) { 3440 if (!cmd.va) {
3441 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3441 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3442 status = -ENOMEM; 3442 status = -ENOMEM;
@@ -3522,9 +3522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
3522 3522
3523 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 3523 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
3524 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 3524 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
3525 attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3525 attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
3526 attribs_cmd.size, 3526 attribs_cmd.size,
3527 &attribs_cmd.dma, GFP_ATOMIC); 3527 &attribs_cmd.dma, GFP_ATOMIC);
3528 if (!attribs_cmd.va) { 3528 if (!attribs_cmd.va) {
3529 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3529 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3530 status = -ENOMEM; 3530 status = -ENOMEM;
@@ -3699,10 +3699,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3699 3699
3700 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 3700 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
3701 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 3701 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
3702 get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3702 get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
3703 get_mac_list_cmd.size, 3703 get_mac_list_cmd.size,
3704 &get_mac_list_cmd.dma, 3704 &get_mac_list_cmd.dma,
3705 GFP_ATOMIC); 3705 GFP_ATOMIC);
3706 3706
3707 if (!get_mac_list_cmd.va) { 3707 if (!get_mac_list_cmd.va) {
3708 dev_err(&adapter->pdev->dev, 3708 dev_err(&adapter->pdev->dev,
@@ -3829,8 +3829,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3829 3829
3830 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3830 memset(&cmd, 0, sizeof(struct be_dma_mem));
3831 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3831 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3832 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3832 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3833 GFP_KERNEL); 3833 GFP_KERNEL);
3834 if (!cmd.va) 3834 if (!cmd.va)
3835 return -ENOMEM; 3835 return -ENOMEM;
3836 3836
@@ -4035,8 +4035,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
4035 4035
4036 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4036 memset(&cmd, 0, sizeof(struct be_dma_mem));
4037 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 4037 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
4038 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4038 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4039 GFP_ATOMIC); 4039 GFP_ATOMIC);
4040 if (!cmd.va) { 4040 if (!cmd.va) {
4041 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 4041 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
4042 status = -ENOMEM; 4042 status = -ENOMEM;
@@ -4089,9 +4089,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
4089 4089
4090 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4090 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4091 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4091 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4092 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 4092 extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
4093 extfat_cmd.size, &extfat_cmd.dma, 4093 extfat_cmd.size, &extfat_cmd.dma,
4094 GFP_ATOMIC); 4094 GFP_ATOMIC);
4095 if (!extfat_cmd.va) 4095 if (!extfat_cmd.va)
4096 return -ENOMEM; 4096 return -ENOMEM;
4097 4097
@@ -4127,9 +4127,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
4127 4127
4128 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4128 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4129 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4129 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4130 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 4130 extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
4131 extfat_cmd.size, &extfat_cmd.dma, 4131 extfat_cmd.size, &extfat_cmd.dma,
4132 GFP_ATOMIC); 4132 GFP_ATOMIC);
4133 4133
4134 if (!extfat_cmd.va) { 4134 if (!extfat_cmd.va) {
4135 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 4135 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -4354,8 +4354,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
4354 4354
4355 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4355 memset(&cmd, 0, sizeof(struct be_dma_mem));
4356 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 4356 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
4357 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4357 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4358 GFP_ATOMIC); 4358 GFP_ATOMIC);
4359 if (!cmd.va) { 4359 if (!cmd.va) {
4360 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 4360 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
4361 status = -ENOMEM; 4361 status = -ENOMEM;
@@ -4452,8 +4452,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
4452 4452
4453 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4453 memset(&cmd, 0, sizeof(struct be_dma_mem));
4454 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 4454 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
4455 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4455 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4456 GFP_ATOMIC); 4456 GFP_ATOMIC);
4457 if (!cmd.va) 4457 if (!cmd.va)
4458 return -ENOMEM; 4458 return -ENOMEM;
4459 4459
@@ -4539,8 +4539,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
4539 4539
4540 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4540 memset(&cmd, 0, sizeof(struct be_dma_mem));
4541 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 4541 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
4542 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4542 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4543 GFP_ATOMIC); 4543 GFP_ATOMIC);
4544 if (!cmd.va) 4544 if (!cmd.va)
4545 return -ENOMEM; 4545 return -ENOMEM;
4546 4546
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 3f6749fc889f..4c218341c51b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -274,8 +274,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
274 int status = 0; 274 int status = 0;
275 275
276 read_cmd.size = LANCER_READ_FILE_CHUNK; 276 read_cmd.size = LANCER_READ_FILE_CHUNK;
277 read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, 277 read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size,
278 &read_cmd.dma, GFP_ATOMIC); 278 &read_cmd.dma, GFP_ATOMIC);
279 279
280 if (!read_cmd.va) { 280 if (!read_cmd.va) {
281 dev_err(&adapter->pdev->dev, 281 dev_err(&adapter->pdev->dev,
@@ -815,7 +815,7 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
815 } 815 }
816 816
817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
818 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); 818 cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
819 if (!cmd.va) 819 if (!cmd.va)
820 return -ENOMEM; 820 return -ENOMEM;
821 821
@@ -851,9 +851,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
851 }; 851 };
852 852
853 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 853 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
854 ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 854 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
855 ddrdma_cmd.size, &ddrdma_cmd.dma, 855 ddrdma_cmd.size, &ddrdma_cmd.dma,
856 GFP_KERNEL); 856 GFP_KERNEL);
857 if (!ddrdma_cmd.va) 857 if (!ddrdma_cmd.va)
858 return -ENOMEM; 858 return -ENOMEM;
859 859
@@ -1014,9 +1014,9 @@ static int be_read_eeprom(struct net_device *netdev,
1014 1014
1015 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 1015 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
1016 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 1016 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
1017 eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1017 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
1018 eeprom_cmd.size, &eeprom_cmd.dma, 1018 eeprom_cmd.size, &eeprom_cmd.dma,
1019 GFP_KERNEL); 1019 GFP_KERNEL);
1020 1020
1021 if (!eeprom_cmd.va) 1021 if (!eeprom_cmd.va)
1022 return -ENOMEM; 1022 return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 852f5bfe5f6d..d5026909dec5 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -167,8 +167,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
167 q->len = len; 167 q->len = len;
168 q->entry_size = entry_size; 168 q->entry_size = entry_size;
169 mem->size = len * entry_size; 169 mem->size = len * entry_size;
170 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, 170 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
171 GFP_KERNEL); 171 &mem->dma, GFP_KERNEL);
172 if (!mem->va) 172 if (!mem->va)
173 return -ENOMEM; 173 return -ENOMEM;
174 return 0; 174 return 0;
@@ -5766,9 +5766,9 @@ static int be_drv_init(struct be_adapter *adapter)
5766 int status = 0; 5766 int status = 0;
5767 5767
5768 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 5768 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5769 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, 5769 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5770 &mbox_mem_alloc->dma, 5770 &mbox_mem_alloc->dma,
5771 GFP_KERNEL); 5771 GFP_KERNEL);
5772 if (!mbox_mem_alloc->va) 5772 if (!mbox_mem_alloc->va)
5773 return -ENOMEM; 5773 return -ENOMEM;
5774 5774
@@ -5777,8 +5777,8 @@ static int be_drv_init(struct be_adapter *adapter)
5777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 5777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5778 5778
5779 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 5779 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5780 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, 5780 rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
5781 &rx_filter->dma, GFP_KERNEL); 5781 &rx_filter->dma, GFP_KERNEL);
5782 if (!rx_filter->va) { 5782 if (!rx_filter->va) {
5783 status = -ENOMEM; 5783 status = -ENOMEM;
5784 goto free_mbox; 5784 goto free_mbox;
@@ -5792,8 +5792,8 @@ static int be_drv_init(struct be_adapter *adapter)
5792 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); 5792 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5793 else 5793 else
5794 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); 5794 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5795 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, 5795 stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
5796 &stats_cmd->dma, GFP_KERNEL); 5796 &stats_cmd->dma, GFP_KERNEL);
5797 if (!stats_cmd->va) { 5797 if (!stats_cmd->va) {
5798 status = -ENOMEM; 5798 status = -ENOMEM;
5799 goto free_rx_filter; 5799 goto free_rx_filter;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 4d673225ed3e..3e5e97186fc4 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -935,16 +935,14 @@ static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
935 return -ENOMEM; 935 return -ENOMEM;
936 936
937 /* Allocate descriptors */ 937 /* Allocate descriptors */
938 priv->rxdes = dma_zalloc_coherent(priv->dev, 938 priv->rxdes = dma_alloc_coherent(priv->dev,
939 MAX_RX_QUEUE_ENTRIES * 939 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
940 sizeof(struct ftgmac100_rxdes), 940 &priv->rxdes_dma, GFP_KERNEL);
941 &priv->rxdes_dma, GFP_KERNEL);
942 if (!priv->rxdes) 941 if (!priv->rxdes)
943 return -ENOMEM; 942 return -ENOMEM;
944 priv->txdes = dma_zalloc_coherent(priv->dev, 943 priv->txdes = dma_alloc_coherent(priv->dev,
945 MAX_TX_QUEUE_ENTRIES * 944 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
946 sizeof(struct ftgmac100_txdes), 945 &priv->txdes_dma, GFP_KERNEL);
947 &priv->txdes_dma, GFP_KERNEL);
948 if (!priv->txdes) 946 if (!priv->txdes)
949 return -ENOMEM; 947 return -ENOMEM;
950 948
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 084f24daf2b5..2a0e820526dc 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -734,10 +734,9 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
734{ 734{
735 int i; 735 int i;
736 736
737 priv->descs = dma_zalloc_coherent(priv->dev, 737 priv->descs = dma_alloc_coherent(priv->dev,
738 sizeof(struct ftmac100_descs), 738 sizeof(struct ftmac100_descs),
739 &priv->descs_dma_addr, 739 &priv->descs_dma_addr, GFP_KERNEL);
740 GFP_KERNEL);
741 if (!priv->descs) 740 if (!priv->descs)
742 return -ENOMEM; 741 return -ENOMEM;
743 742
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index f53090cde041..dfebc30c4841 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2051 bool nonlinear = skb_is_nonlinear(skb); 2051 bool nonlinear = skb_is_nonlinear(skb);
2052 struct rtnl_link_stats64 *percpu_stats; 2052 struct rtnl_link_stats64 *percpu_stats;
2053 struct dpaa_percpu_priv *percpu_priv; 2053 struct dpaa_percpu_priv *percpu_priv;
2054 struct netdev_queue *txq;
2054 struct dpaa_priv *priv; 2055 struct dpaa_priv *priv;
2055 struct qm_fd fd; 2056 struct qm_fd fd;
2056 int offset = 0; 2057 int offset = 0;
@@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2100 if (unlikely(err < 0)) 2101 if (unlikely(err < 0))
2101 goto skb_to_fd_failed; 2102 goto skb_to_fd_failed;
2102 2103
2104 txq = netdev_get_tx_queue(net_dev, queue_mapping);
2105
2106 /* LLTX requires to do our own update of trans_start */
2107 txq->trans_start = jiffies;
2108
2103 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 2109 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2104 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); 2110 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
2105 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2111 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index 809a155eb193..f6d244c663fd 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -9,8 +9,9 @@ config FSL_DPAA2_ETH
9 9
10config FSL_DPAA2_PTP_CLOCK 10config FSL_DPAA2_PTP_CLOCK
11 tristate "Freescale DPAA2 PTP Clock" 11 tristate "Freescale DPAA2 PTP Clock"
12 depends on FSL_DPAA2_ETH && POSIX_TIMERS 12 depends on FSL_DPAA2_ETH
13 select PTP_1588_CLOCK 13 imply PTP_1588_CLOCK
14 default y
14 help 15 help
15 This driver adds support for using the DPAA2 1588 timer module 16 This driver adds support for using the DPAA2 1588 timer module
16 as a PTP clock. 17 as a PTP clock.
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ae0f88bce9aa..697c2427f2b7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
2098#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2098#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2099 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2099 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2100 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2100 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2101static __u32 fec_enet_register_version = 2;
2101static u32 fec_enet_register_offset[] = { 2102static u32 fec_enet_register_offset[] = {
2102 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2103 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2103 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2104 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = {
2128 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2129 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2129}; 2130};
2130#else 2131#else
2132static __u32 fec_enet_register_version = 1;
2131static u32 fec_enet_register_offset[] = { 2133static u32 fec_enet_register_offset[] = {
2132 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2134 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2133 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2135 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
@@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
2149 u32 *buf = (u32 *)regbuf; 2151 u32 *buf = (u32 *)regbuf;
2150 u32 i, off; 2152 u32 i, off;
2151 2153
2154 regs->version = fec_enet_register_version;
2155
2152 memset(buf, 0, regs->len); 2156 memset(buf, 0, regs->len);
2153 2157
2154 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { 2158 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
@@ -3467,7 +3471,7 @@ fec_probe(struct platform_device *pdev)
3467 if (ret) 3471 if (ret)
3468 goto failed_clk_ipg; 3472 goto failed_clk_ipg;
3469 3473
3470 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3474 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
3471 if (!IS_ERR(fep->reg_phy)) { 3475 if (!IS_ERR(fep->reg_phy)) {
3472 ret = regulator_enable(fep->reg_phy); 3476 ret = regulator_enable(fep->reg_phy);
3473 if (ret) { 3477 if (ret) {
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index b90bab72efdb..c1968b3ecec8 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
369 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, 369 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
370 DMA_TO_DEVICE); 370 DMA_TO_DEVICE);
371 371
372 dev_kfree_skb_irq(skb); 372 dev_consume_skb_irq(skb);
373 } 373 }
374 spin_unlock(&priv->lock); 374 spin_unlock(&priv->lock);
375 375
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index c3d539e209ed..eb3e65e8868f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
1879 u16 i, j; 1879 u16 i, j;
1880 u8 __iomem *bd; 1880 u8 __iomem *bd;
1881 1881
1882 netdev_reset_queue(ugeth->ndev);
1883
1882 ug_info = ugeth->ug_info; 1884 ug_info = ugeth->ug_info;
1883 uf_info = &ug_info->uf_info; 1885 uf_info = &ug_info->uf_info;
1884 1886
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 471805ea363b..e5d853b7b454 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1006,8 +1006,8 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
1006 1006
1007 for (i = 0; i < QUEUE_NUMS; i++) { 1007 for (i = 0; i < QUEUE_NUMS; i++) {
1008 size = priv->pool[i].count * sizeof(struct hix5hd2_desc); 1008 size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
1009 virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, 1009 virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
1010 GFP_KERNEL); 1010 GFP_KERNEL);
1011 if (virt_addr == NULL) 1011 if (virt_addr == NULL)
1012 goto error_free_pool; 1012 goto error_free_pool;
1013 1013
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index ad1779fc410e..a78bfafd212c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
147 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); 147 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
148 int i; 148 int i;
149 149
150 vf_cb->mac_cb = NULL;
151
152 kfree(vf_cb);
153
154 for (i = 0; i < handle->q_num; i++) 150 for (i = 0; i < handle->q_num; i++)
155 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; 151 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
152
153 kfree(vf_cb);
156} 154}
157 155
158static int hns_ae_wait_flow_down(struct hnae_handle *handle) 156static int hns_ae_wait_flow_down(struct hnae_handle *handle)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 3b9e74be5fbd..ac55db065f16 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
3081 dsaf_dev = dev_get_drvdata(&pdev->dev); 3081 dsaf_dev = dev_get_drvdata(&pdev->dev);
3082 if (!dsaf_dev) { 3082 if (!dsaf_dev) {
3083 dev_err(&pdev->dev, "dsaf_dev is NULL\n"); 3083 dev_err(&pdev->dev, "dsaf_dev is NULL\n");
3084 put_device(&pdev->dev);
3084 return -ENODEV; 3085 return -ENODEV;
3085 } 3086 }
3086 3087
@@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
3088 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { 3089 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
3089 dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", 3090 dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
3090 dsaf_dev->ae_dev.name); 3091 dsaf_dev->ae_dev.name);
3092 put_device(&pdev->dev);
3091 return -ENODEV; 3093 return -ENODEV;
3092 } 3094 }
3093 3095
@@ -3126,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
3126 dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); 3128 dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
3127 dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); 3129 dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
3128 } 3130 }
3131
3132 put_device(&pdev->dev);
3133
3129 return 0; 3134 return 0;
3130} 3135}
3131EXPORT_SYMBOL(hns_dsaf_roce_reset); 3136EXPORT_SYMBOL(hns_dsaf_roce_reset);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 5748d3f722f6..60e7d7ae3787 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1170,6 +1170,13 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1170 if (!h->phy_dev) 1170 if (!h->phy_dev)
1171 return 0; 1171 return 0;
1172 1172
1173 ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
1174 linkmode_and(phy_dev->supported, phy_dev->supported, supported);
1175 linkmode_copy(phy_dev->advertising, phy_dev->supported);
1176
1177 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1178 phy_dev->autoneg = false;
1179
1173 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { 1180 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1174 phy_dev->dev_flags = 0; 1181 phy_dev->dev_flags = 0;
1175 1182
@@ -1181,16 +1188,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1181 if (unlikely(ret)) 1188 if (unlikely(ret))
1182 return -ENODEV; 1189 return -ENODEV;
1183 1190
1184 ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
1185 linkmode_and(phy_dev->supported, phy_dev->supported, supported);
1186 linkmode_copy(phy_dev->advertising, phy_dev->supported);
1187
1188 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1189 phy_dev->autoneg = false;
1190
1191 if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
1192 phy_stop(phy_dev);
1193
1194 return 0; 1191 return 0;
1195} 1192}
1196 1193
@@ -2421,6 +2418,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
2421out_notify_fail: 2418out_notify_fail:
2422 (void)cancel_work_sync(&priv->service_task); 2419 (void)cancel_work_sync(&priv->service_task);
2423out_read_prop_fail: 2420out_read_prop_fail:
2421 /* safe for ACPI FW */
2422 of_node_put(to_of_node(priv->fwnode));
2424 free_netdev(ndev); 2423 free_netdev(ndev);
2425 return ret; 2424 return ret;
2426} 2425}
@@ -2450,6 +2449,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
2450 set_bit(NIC_STATE_REMOVING, &priv->state); 2449 set_bit(NIC_STATE_REMOVING, &priv->state);
2451 (void)cancel_work_sync(&priv->service_task); 2450 (void)cancel_work_sync(&priv->service_task);
2452 2451
2452 /* safe for ACPI FW */
2453 of_node_put(to_of_node(priv->fwnode));
2454
2453 free_netdev(ndev); 2455 free_netdev(ndev);
2454 return 0; 2456 return 0;
2455} 2457}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 8e9b95871d30..ce15d2350db9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
1157 */ 1157 */
1158static int hns_nic_nway_reset(struct net_device *netdev) 1158static int hns_nic_nway_reset(struct net_device *netdev)
1159{ 1159{
1160 int ret = 0;
1161 struct phy_device *phy = netdev->phydev; 1160 struct phy_device *phy = netdev->phydev;
1162 1161
1163 if (netif_running(netdev)) { 1162 if (!netif_running(netdev))
1164 /* if autoneg is disabled, don't restart auto-negotiation */ 1163 return 0;
1165 if (phy && phy->autoneg == AUTONEG_ENABLE)
1166 ret = genphy_restart_aneg(phy);
1167 }
1168 1164
1169 return ret; 1165 if (!phy)
1166 return -EOPNOTSUPP;
1167
1168 if (phy->autoneg != AUTONEG_ENABLE)
1169 return -EINVAL;
1170
1171 return genphy_restart_aneg(phy);
1170} 1172}
1171 1173
1172static u32 1174static u32
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 07cd58798083..1bf7a5f116a0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2041,9 +2041,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2041{ 2041{
2042 int size = ring->desc_num * sizeof(ring->desc[0]); 2042 int size = ring->desc_num * sizeof(ring->desc[0]);
2043 2043
2044 ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, 2044 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2045 &ring->desc_dma_addr, 2045 &ring->desc_dma_addr, GFP_KERNEL);
2046 GFP_KERNEL);
2047 if (!ring->desc) 2046 if (!ring->desc)
2048 return -ENOMEM; 2047 return -ENOMEM;
2049 2048
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 8af0cef5609b..e483a6e730e6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
39{ 39{
40 int size = ring->desc_num * sizeof(struct hclge_desc); 40 int size = ring->desc_num * sizeof(struct hclge_desc);
41 41
42 ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), 42 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
43 size, &ring->desc_dma_addr, 43 &ring->desc_dma_addr, GFP_KERNEL);
44 GFP_KERNEL);
45 if (!ring->desc) 44 if (!ring->desc)
46 return -ENOMEM; 45 return -ENOMEM;
47 46
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index d5765c8cf3a3..4e78e8812a04 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
115{ 115{
116 int size = ring->desc_num * sizeof(struct hclgevf_desc); 116 int size = ring->desc_num * sizeof(struct hclgevf_desc);
117 117
118 ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), 118 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
119 size, &ring->desc_dma_addr, 119 &ring->desc_dma_addr, GFP_KERNEL);
120 GFP_KERNEL);
121 if (!ring->desc) 120 if (!ring->desc)
122 return -ENOMEM; 121 return -ENOMEM;
123 122
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 017e08452d8c..baf5cc251f32 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
321 } 321 }
322 322
323 hns_mdio_cmd_write(mdio_dev, is_c45, 323 hns_mdio_cmd_write(mdio_dev, is_c45,
324 MDIO_C45_WRITE_ADDR, phy_id, devad); 324 MDIO_C45_READ, phy_id, devad);
325 } 325 }
326 326
327 /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/ 327 /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index c40603a183df..b4fefb4c3064 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -613,8 +613,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
613 u8 *cmd_vaddr; 613 u8 *cmd_vaddr;
614 int err = 0; 614 int err = 0;
615 615
616 cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, 616 cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
617 &cmd_paddr, GFP_KERNEL); 617 &cmd_paddr, GFP_KERNEL);
618 if (!cmd_vaddr) { 618 if (!cmd_vaddr) {
619 dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); 619 dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
620 return -ENOMEM; 620 return -ENOMEM;
@@ -663,8 +663,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
663 dma_addr_t node_paddr; 663 dma_addr_t node_paddr;
664 int err; 664 int err;
665 665
666 node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, 666 node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
667 &node_paddr, GFP_KERNEL); 667 GFP_KERNEL);
668 if (!node) { 668 if (!node) {
669 dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); 669 dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
670 return -ENOMEM; 670 return -ENOMEM;
@@ -821,10 +821,10 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
821 if (!chain->cell_ctxt) 821 if (!chain->cell_ctxt)
822 return -ENOMEM; 822 return -ENOMEM;
823 823
824 chain->wb_status = dma_zalloc_coherent(&pdev->dev, 824 chain->wb_status = dma_alloc_coherent(&pdev->dev,
825 sizeof(*chain->wb_status), 825 sizeof(*chain->wb_status),
826 &chain->wb_status_paddr, 826 &chain->wb_status_paddr,
827 GFP_KERNEL); 827 GFP_KERNEL);
828 if (!chain->wb_status) { 828 if (!chain->wb_status) {
829 dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); 829 dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
830 return -ENOMEM; 830 return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 7cb8b9b94726..683e67515016 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -593,10 +593,10 @@ static int alloc_eq_pages(struct hinic_eq *eq)
593 } 593 }
594 594
595 for (pg = 0; pg < eq->num_pages; pg++) { 595 for (pg = 0; pg < eq->num_pages; pg++) {
596 eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, 596 eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
597 eq->page_size, 597 eq->page_size,
598 &eq->dma_addr[pg], 598 &eq->dma_addr[pg],
599 GFP_KERNEL); 599 GFP_KERNEL);
600 if (!eq->virt_addr[pg]) { 600 if (!eq->virt_addr[pg]) {
601 err = -ENOMEM; 601 err = -ENOMEM;
602 goto err_dma_alloc; 602 goto err_dma_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index 8e5897669a3a..a322a22d9357 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -355,9 +355,9 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
355 goto err_sq_db; 355 goto err_sq_db;
356 } 356 }
357 357
358 ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 358 ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
359 &func_to_io->ci_dma_base, 359 &func_to_io->ci_dma_base,
360 GFP_KERNEL); 360 GFP_KERNEL);
361 if (!ci_addr_base) { 361 if (!ci_addr_base) {
362 dev_err(&pdev->dev, "Failed to allocate CI area\n"); 362 dev_err(&pdev->dev, "Failed to allocate CI area\n");
363 err = -ENOMEM; 363 err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index bbf9bdd0ee3e..d62cf509646a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -336,9 +336,9 @@ static int alloc_rq_cqe(struct hinic_rq *rq)
336 goto err_cqe_dma_arr_alloc; 336 goto err_cqe_dma_arr_alloc;
337 337
338 for (i = 0; i < wq->q_depth; i++) { 338 for (i = 0; i < wq->q_depth; i++) {
339 rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, 339 rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
340 sizeof(*rq->cqe[i]), 340 sizeof(*rq->cqe[i]),
341 &rq->cqe_dma[i], GFP_KERNEL); 341 &rq->cqe_dma[i], GFP_KERNEL);
342 if (!rq->cqe[i]) 342 if (!rq->cqe[i])
343 goto err_cqe_alloc; 343 goto err_cqe_alloc;
344 } 344 }
@@ -415,8 +415,8 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
415 415
416 /* HW requirements: Must be at least 32 bit */ 416 /* HW requirements: Must be at least 32 bit */
417 pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); 417 pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
418 rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, 418 rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
419 &rq->pi_dma_addr, GFP_KERNEL); 419 &rq->pi_dma_addr, GFP_KERNEL);
420 if (!rq->pi_virt_addr) { 420 if (!rq->pi_virt_addr) {
421 dev_err(&pdev->dev, "Failed to allocate PI address\n"); 421 dev_err(&pdev->dev, "Failed to allocate PI address\n");
422 err = -ENOMEM; 422 err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 1dfa7eb05c10..cb66e7024659 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -114,8 +114,8 @@ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
114 struct pci_dev *pdev = hwif->pdev; 114 struct pci_dev *pdev = hwif->pdev;
115 dma_addr_t dma_addr; 115 dma_addr_t dma_addr;
116 116
117 *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, 117 *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
118 GFP_KERNEL); 118 GFP_KERNEL);
119 if (!*vaddr) { 119 if (!*vaddr) {
120 dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); 120 dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
121 return -ENOMEM; 121 return -ENOMEM;
@@ -482,8 +482,8 @@ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
482 u64 *paddr = &wq->block_vaddr[i]; 482 u64 *paddr = &wq->block_vaddr[i];
483 dma_addr_t dma_addr; 483 dma_addr_t dma_addr;
484 484
485 *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, 485 *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
486 &dma_addr, GFP_KERNEL); 486 &dma_addr, GFP_KERNEL);
487 if (!*vaddr) { 487 if (!*vaddr) {
488 dev_err(&pdev->dev, "Failed to allocate wq page\n"); 488 dev_err(&pdev->dev, "Failed to allocate wq page\n");
489 goto err_alloc_wq_pages; 489 goto err_alloc_wq_pages;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index d719668a6684..92929750f832 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1310,7 +1310,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1310 dev->stats.tx_aborted_errors++; 1310 dev->stats.tx_aborted_errors++;
1311 } 1311 }
1312 1312
1313 dev_kfree_skb_irq(skb); 1313 dev_consume_skb_irq(skb);
1314 1314
1315 tx_cmd->cmd.command = 0; /* Mark free */ 1315 tx_cmd->cmd.command = 0; /* Mark free */
1316 break; 1316 break;
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index fff09dcf9e34..787d5aca5278 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -636,8 +636,8 @@ static int mal_probe(struct platform_device *ofdev)
636 bd_size = sizeof(struct mal_descriptor) * 636 bd_size = sizeof(struct mal_descriptor) *
637 (NUM_TX_BUFF * mal->num_tx_chans + 637 (NUM_TX_BUFF * mal->num_tx_chans +
638 NUM_RX_BUFF * mal->num_rx_chans); 638 NUM_RX_BUFF * mal->num_rx_chans);
639 mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 639 mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
640 GFP_KERNEL); 640 GFP_KERNEL);
641 if (mal->bd_virt == NULL) { 641 if (mal->bd_virt == NULL) {
642 err = -ENOMEM; 642 err = -ENOMEM;
643 goto fail_unmap; 643 goto fail_unmap;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 098d8764c0ea..dd71d5db7274 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1313,7 +1313,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1313 unsigned long lpar_rc; 1313 unsigned long lpar_rc;
1314 u16 mss = 0; 1314 u16 mss = 0;
1315 1315
1316restart_poll:
1317 while (frames_processed < budget) { 1316 while (frames_processed < budget) {
1318 if (!ibmveth_rxq_pending_buffer(adapter)) 1317 if (!ibmveth_rxq_pending_buffer(adapter))
1319 break; 1318 break;
@@ -1401,7 +1400,6 @@ restart_poll:
1401 napi_reschedule(napi)) { 1400 napi_reschedule(napi)) {
1402 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1401 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1403 VIO_IRQ_DISABLE); 1402 VIO_IRQ_DISABLE);
1404 goto restart_poll;
1405 } 1403 }
1406 } 1404 }
1407 1405
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 31fb76ee9d82..a1246e89aad4 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -159,7 +159,7 @@ config IXGBE
159 tristate "Intel(R) 10GbE PCI Express adapters support" 159 tristate "Intel(R) 10GbE PCI Express adapters support"
160 depends on PCI 160 depends on PCI
161 select MDIO 161 select MDIO
162 select MDIO_DEVICE 162 select PHYLIB
163 imply PTP_1588_CLOCK 163 imply PTP_1588_CLOCK
164 ---help--- 164 ---help---
165 This driver supports Intel(R) 10GbE PCI Express family of 165 This driver supports Intel(R) 10GbE PCI Express family of
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2569a168334c..a41008523c98 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -993,8 +993,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
993 993
994 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 994 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
995 txdr->size = ALIGN(txdr->size, 4096); 995 txdr->size = ALIGN(txdr->size, 4096);
996 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 996 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
997 GFP_KERNEL); 997 GFP_KERNEL);
998 if (!txdr->desc) { 998 if (!txdr->desc) {
999 ret_val = 2; 999 ret_val = 2;
1000 goto err_nomem; 1000 goto err_nomem;
@@ -1051,8 +1051,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1051 } 1051 }
1052 1052
1053 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1053 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1054 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1054 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1055 GFP_KERNEL); 1055 GFP_KERNEL);
1056 if (!rxdr->desc) { 1056 if (!rxdr->desc) {
1057 ret_val = 6; 1057 ret_val = 6;
1058 goto err_nomem; 1058 goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 308c006cb41d..189f231075c2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2305,8 +2305,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2305{ 2305{
2306 struct pci_dev *pdev = adapter->pdev; 2306 struct pci_dev *pdev = adapter->pdev;
2307 2307
2308 ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, 2308 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2309 GFP_KERNEL); 2309 GFP_KERNEL);
2310 if (!ring->desc) 2310 if (!ring->desc)
2311 return -ENOMEM; 2311 return -ENOMEM;
2312 2312
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4d40878e395a..e4ff531db14a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 109 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
110 110
111 mem->size = ALIGN(size, alignment); 111 mem->size = ALIGN(size, alignment);
112 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 112 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
113 &mem->pa, GFP_KERNEL); 113 GFP_KERNEL);
114 if (!mem->va) 114 if (!mem->va)
115 return -ENOMEM; 115 return -ENOMEM;
116 116
@@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : 3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3291 if (!ok) { 3291 if (!ok) {
3292 /* Log this in case the user has forgotten to give the kernel
3293 * any buffers, even later in the application.
3294 */
3292 dev_info(&vsi->back->pdev->dev, 3295 dev_info(&vsi->back->pdev->dev,
3293 "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", 3296 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3294 ring->xsk_umem ? "UMEM enabled " : "", 3297 ring->xsk_umem ? "UMEM enabled " : "",
3295 ring->queue_index, pf_q); 3298 ring->queue_index, pf_q);
3296 } 3299 }
@@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi)
6725 6728
6726 for (i = 0; i < vsi->num_queue_pairs; i++) { 6729 for (i = 0; i < vsi->num_queue_pairs; i++) {
6727 i40e_clean_tx_ring(vsi->tx_rings[i]); 6730 i40e_clean_tx_ring(vsi->tx_rings[i]);
6728 if (i40e_enabled_xdp_vsi(vsi)) 6731 if (i40e_enabled_xdp_vsi(vsi)) {
6732 /* Make sure that in-progress ndo_xdp_xmit
6733 * calls are completed.
6734 */
6735 synchronize_rcu();
6729 i40e_clean_tx_ring(vsi->xdp_rings[i]); 6736 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6737 }
6730 i40e_clean_rx_ring(vsi->rx_rings[i]); 6738 i40e_clean_rx_ring(vsi->rx_rings[i]);
6731 } 6739 }
6732 6740
@@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
11895 if (old_prog) 11903 if (old_prog)
11896 bpf_prog_put(old_prog); 11904 bpf_prog_put(old_prog);
11897 11905
11906 /* Kick start the NAPI context if there is an AF_XDP socket open
11907 * on that queue id. This so that receiving will start.
11908 */
11909 if (need_reset && prog)
11910 for (i = 0; i < vsi->num_queue_pairs; i++)
11911 if (vsi->xdp_rings[i]->xsk_umem)
11912 (void)i40e_xsk_async_xmit(vsi->netdev, i);
11913
11898 return 0; 11914 return 0;
11899} 11915}
11900 11916
@@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
11955static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) 11971static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
11956{ 11972{
11957 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); 11973 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
11958 if (i40e_enabled_xdp_vsi(vsi)) 11974 if (i40e_enabled_xdp_vsi(vsi)) {
11975 /* Make sure that in-progress ndo_xdp_xmit calls are
11976 * completed.
11977 */
11978 synchronize_rcu();
11959 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); 11979 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
11980 }
11960 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); 11981 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
11961} 11982}
11962 11983
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a7e14e98889f..6c97667d20ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3709 struct i40e_netdev_priv *np = netdev_priv(dev); 3709 struct i40e_netdev_priv *np = netdev_priv(dev);
3710 unsigned int queue_index = smp_processor_id(); 3710 unsigned int queue_index = smp_processor_id();
3711 struct i40e_vsi *vsi = np->vsi; 3711 struct i40e_vsi *vsi = np->vsi;
3712 struct i40e_pf *pf = vsi->back;
3712 struct i40e_ring *xdp_ring; 3713 struct i40e_ring *xdp_ring;
3713 int drops = 0; 3714 int drops = 0;
3714 int i; 3715 int i;
@@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3716 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 3717 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3717 return -ENETDOWN; 3718 return -ENETDOWN;
3718 3719
3719 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) 3720 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3721 test_bit(__I40E_CONFIG_BUSY, pf->state))
3720 return -ENXIO; 3722 return -ENXIO;
3721 3723
3722 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 3724 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 870cf654e436..3827f16e6923 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
183 err = i40e_queue_pair_enable(vsi, qid); 183 err = i40e_queue_pair_enable(vsi, qid);
184 if (err) 184 if (err)
185 return err; 185 return err;
186
187 /* Kick start the NAPI context so that receiving will start */
188 err = i40e_xsk_async_xmit(vsi->netdev, qid);
189 if (err)
190 return err;
186 } 191 }
187 192
188 return 0; 193 return 0;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index fe1592ae8769..ca54e268d157 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -515,7 +515,7 @@ struct igb_adapter {
515 /* OS defined structs */ 515 /* OS defined structs */
516 struct pci_dev *pdev; 516 struct pci_dev *pdev;
517 517
518 struct mutex stats64_lock; 518 spinlock_t stats64_lock;
519 struct rtnl_link_stats64 stats64; 519 struct rtnl_link_stats64 stats64;
520 520
521 /* structs defined in e1000_hw.h */ 521 /* structs defined in e1000_hw.h */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7426060b678f..c57671068245 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2295 int i, j; 2295 int i, j;
2296 char *p; 2296 char *p;
2297 2297
2298 mutex_lock(&adapter->stats64_lock); 2298 spin_lock(&adapter->stats64_lock);
2299 igb_update_stats(adapter); 2299 igb_update_stats(adapter);
2300 2300
2301 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2301 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
@@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2338 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 2338 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
2339 i += IGB_RX_QUEUE_STATS_LEN; 2339 i += IGB_RX_QUEUE_STATS_LEN;
2340 } 2340 }
2341 mutex_unlock(&adapter->stats64_lock); 2341 spin_unlock(&adapter->stats64_lock);
2342} 2342}
2343 2343
2344static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2344static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 87bdf1604ae2..7137e7f9c7f3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2203,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter)
2203 del_timer_sync(&adapter->phy_info_timer); 2203 del_timer_sync(&adapter->phy_info_timer);
2204 2204
2205 /* record the stats before reset*/ 2205 /* record the stats before reset*/
2206 mutex_lock(&adapter->stats64_lock); 2206 spin_lock(&adapter->stats64_lock);
2207 igb_update_stats(adapter); 2207 igb_update_stats(adapter);
2208 mutex_unlock(&adapter->stats64_lock); 2208 spin_unlock(&adapter->stats64_lock);
2209 2209
2210 adapter->link_speed = 0; 2210 adapter->link_speed = 0;
2211 adapter->link_duplex = 0; 2211 adapter->link_duplex = 0;
@@ -3840,7 +3840,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
3840 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3840 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3841 3841
3842 spin_lock_init(&adapter->nfc_lock); 3842 spin_lock_init(&adapter->nfc_lock);
3843 mutex_init(&adapter->stats64_lock); 3843 spin_lock_init(&adapter->stats64_lock);
3844#ifdef CONFIG_PCI_IOV 3844#ifdef CONFIG_PCI_IOV
3845 switch (hw->mac.type) { 3845 switch (hw->mac.type) {
3846 case e1000_82576: 3846 case e1000_82576:
@@ -5406,9 +5406,9 @@ no_wait:
5406 } 5406 }
5407 } 5407 }
5408 5408
5409 mutex_lock(&adapter->stats64_lock); 5409 spin_lock(&adapter->stats64_lock);
5410 igb_update_stats(adapter); 5410 igb_update_stats(adapter);
5411 mutex_unlock(&adapter->stats64_lock); 5411 spin_unlock(&adapter->stats64_lock);
5412 5412
5413 for (i = 0; i < adapter->num_tx_queues; i++) { 5413 for (i = 0; i < adapter->num_tx_queues; i++) {
5414 struct igb_ring *tx_ring = adapter->tx_ring[i]; 5414 struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -6235,10 +6235,10 @@ static void igb_get_stats64(struct net_device *netdev,
6235{ 6235{
6236 struct igb_adapter *adapter = netdev_priv(netdev); 6236 struct igb_adapter *adapter = netdev_priv(netdev);
6237 6237
6238 mutex_lock(&adapter->stats64_lock); 6238 spin_lock(&adapter->stats64_lock);
6239 igb_update_stats(adapter); 6239 igb_update_stats(adapter);
6240 memcpy(stats, &adapter->stats64, sizeof(*stats)); 6240 memcpy(stats, &adapter->stats64, sizeof(*stats));
6241 mutex_unlock(&adapter->stats64_lock); 6241 spin_unlock(&adapter->stats64_lock);
6242} 6242}
6243 6243
6244/** 6244/**
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 1d4d1686909a..e5ac2d3fd816 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -680,8 +680,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
680 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 680 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
681 txdr->size = ALIGN(txdr->size, 4096); 681 txdr->size = ALIGN(txdr->size, 4096);
682 682
683 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 683 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
684 GFP_KERNEL); 684 GFP_KERNEL);
685 if (!txdr->desc) { 685 if (!txdr->desc) {
686 vfree(txdr->buffer_info); 686 vfree(txdr->buffer_info);
687 return -ENOMEM; 687 return -ENOMEM;
@@ -763,8 +763,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
764 rxdr->size = ALIGN(rxdr->size, 4096); 764 rxdr->size = ALIGN(rxdr->size, 4096);
765 765
766 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 766 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
767 GFP_KERNEL); 767 GFP_KERNEL);
768 768
769 if (!rxdr->desc) { 769 if (!rxdr->desc) {
770 vfree(rxdr->buffer_info); 770 vfree(rxdr->buffer_info);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index daff8183534b..cb35d8202572 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3953 else 3953 else
3954 mrqc = IXGBE_MRQC_VMDQRSS64EN; 3954 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3955 3955
3956 /* Enable L3/L4 for Tx Switched packets */ 3956 /* Enable L3/L4 for Tx Switched packets only for X550,
3957 mrqc |= IXGBE_MRQC_L3L4TXSWEN; 3957 * older devices do not support this feature
3958 */
3959 if (hw->mac.type >= ixgbe_mac_X550)
3960 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3958 } else { 3961 } else {
3959 if (tcs > 4) 3962 if (tcs > 4)
3960 mrqc = IXGBE_MRQC_RTRSS8TCEN; 3963 mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10225 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 10228 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10226 struct ixgbe_adapter *adapter = netdev_priv(dev); 10229 struct ixgbe_adapter *adapter = netdev_priv(dev);
10227 struct bpf_prog *old_prog; 10230 struct bpf_prog *old_prog;
10231 bool need_reset;
10228 10232
10229 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 10233 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10230 return -EINVAL; 10234 return -EINVAL;
@@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10247 return -ENOMEM; 10251 return -ENOMEM;
10248 10252
10249 old_prog = xchg(&adapter->xdp_prog, prog); 10253 old_prog = xchg(&adapter->xdp_prog, prog);
10254 need_reset = (!!prog != !!old_prog);
10250 10255
10251 /* If transitioning XDP modes reconfigure rings */ 10256 /* If transitioning XDP modes reconfigure rings */
10252 if (!!prog != !!old_prog) { 10257 if (need_reset) {
10253 int err = ixgbe_setup_tc(dev, adapter->hw_tcs); 10258 int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10254 10259
10255 if (err) { 10260 if (err) {
@@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10265 if (old_prog) 10270 if (old_prog)
10266 bpf_prog_put(old_prog); 10271 bpf_prog_put(old_prog);
10267 10272
10273 /* Kick start the NAPI context if there is an AF_XDP socket open
10274 * on that queue id. This so that receiving will start.
10275 */
10276 if (need_reset && prog)
10277 for (i = 0; i < adapter->num_rx_queues; i++)
10278 if (adapter->xdp_ring[i]->xsk_umem)
10279 (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
10280
10268 return 0; 10281 return 0;
10269} 10282}
10270 10283
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 65c3e2c979d4..36a8879536a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
144 ixgbe_txrx_ring_disable(adapter, qid); 144 ixgbe_txrx_ring_disable(adapter, qid);
145 145
146 err = ixgbe_add_xsk_umem(adapter, umem, qid); 146 err = ixgbe_add_xsk_umem(adapter, umem, qid);
147 if (err)
148 return err;
147 149
148 if (if_running) 150 if (if_running) {
149 ixgbe_txrx_ring_enable(adapter, qid); 151 ixgbe_txrx_ring_enable(adapter, qid);
150 152
151 return err; 153 /* Kick start the NAPI context so that receiving will start */
154 err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
155 if (err)
156 return err;
157 }
158
159 return 0;
152} 160}
153 161
154static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) 162static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
@@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
634 dma_addr_t dma; 642 dma_addr_t dma;
635 643
636 while (budget-- > 0) { 644 while (budget-- > 0) {
637 if (unlikely(!ixgbe_desc_unused(xdp_ring))) { 645 if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
646 !netif_carrier_ok(xdp_ring->netdev)) {
638 work_done = false; 647 work_done = false;
639 break; 648 break;
640 } 649 }
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2f427271a793..292a668ce88e 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2879 2879
2880 ret = mv643xx_eth_shared_of_probe(pdev); 2880 ret = mv643xx_eth_shared_of_probe(pdev);
2881 if (ret) 2881 if (ret)
2882 return ret; 2882 goto err_put_clk;
2883 pd = dev_get_platdata(&pdev->dev); 2883 pd = dev_get_platdata(&pdev->dev);
2884 2884
2885 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2885 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2887 infer_hw_params(msp); 2887 infer_hw_params(msp);
2888 2888
2889 return 0; 2889 return 0;
2890
2891err_put_clk:
2892 if (!IS_ERR(msp->clk))
2893 clk_disable_unprepare(msp->clk);
2894 return ret;
2890} 2895}
2891 2896
2892static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2897static int mv643xx_eth_shared_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d4568eb2297..8433fb9c3eee 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2146,7 +2146,7 @@ err_drop_frame:
2146 if (unlikely(!skb)) 2146 if (unlikely(!skb))
2147 goto err_drop_frame_ret_pool; 2147 goto err_drop_frame_ret_pool;
2148 2148
2149 dma_sync_single_range_for_cpu(dev->dev.parent, 2149 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2150 rx_desc->buf_phys_addr, 2150 rx_desc->buf_phys_addr,
2151 MVNETA_MH_SIZE + NET_SKB_PAD, 2151 MVNETA_MH_SIZE + NET_SKB_PAD,
2152 rx_bytes, 2152 rx_bytes,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index e0875476a780..16066c2d5b3a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2044,9 +2044,9 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2044 u32 txq_dma; 2044 u32 txq_dma;
2045 2045
2046 /* Allocate memory for TX descriptors */ 2046 /* Allocate memory for TX descriptors */
2047 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, 2047 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2048 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2048 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2049 &aggr_txq->descs_dma, GFP_KERNEL); 2049 &aggr_txq->descs_dma, GFP_KERNEL);
2050 if (!aggr_txq->descs) 2050 if (!aggr_txq->descs)
2051 return -ENOMEM; 2051 return -ENOMEM;
2052 2052
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 742f0c1f60df..6d55e3d0b7ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -825,7 +825,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
825 if (!cgx->cgx_cmd_workq) { 825 if (!cgx->cgx_cmd_workq) {
826 dev_err(dev, "alloc workqueue failed for cgx cmd"); 826 dev_err(dev, "alloc workqueue failed for cgx cmd");
827 err = -ENOMEM; 827 err = -ENOMEM;
828 goto err_release_regions; 828 goto err_free_irq_vectors;
829 } 829 }
830 830
831 list_add(&cgx->cgx_list, &cgx_list); 831 list_add(&cgx->cgx_list, &cgx_list);
@@ -841,6 +841,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
841err_release_lmac: 841err_release_lmac:
842 cgx_lmac_exit(cgx); 842 cgx_lmac_exit(cgx);
843 list_del(&cgx->cgx_list); 843 list_del(&cgx->cgx_list);
844err_free_irq_vectors:
845 pci_free_irq_vectors(pdev);
844err_release_regions: 846err_release_regions:
845 pci_release_regions(pdev); 847 pci_release_regions(pdev);
846err_disable_device: 848err_disable_device:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index ec50a21c5aaf..e332e82fc066 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -64,7 +64,7 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q,
64 64
65 qmem->entry_sz = entry_sz; 65 qmem->entry_sz = entry_sz;
66 qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; 66 qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
67 qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz, 67 qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
68 &qmem->iova, GFP_KERNEL); 68 &qmem->iova, GFP_KERNEL);
69 if (!qmem->base) 69 if (!qmem->base)
70 return -ENOMEM; 70 return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 0bd4351b2a49..f8a6d6e3cb7a 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -557,9 +557,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
557 * table is full. 557 * table is full.
558 */ 558 */
559 if (!pep->htpr) { 559 if (!pep->htpr) {
560 pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, 560 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
561 HASH_ADDR_TABLE_SIZE, 561 HASH_ADDR_TABLE_SIZE,
562 &pep->htpr_dma, GFP_KERNEL); 562 &pep->htpr_dma, GFP_KERNEL);
563 if (!pep->htpr) 563 if (!pep->htpr)
564 return -ENOMEM; 564 return -ENOMEM;
565 } else { 565 } else {
@@ -1044,9 +1044,9 @@ static int rxq_init(struct net_device *dev)
1044 pep->rx_desc_count = 0; 1044 pep->rx_desc_count = 0;
1045 size = pep->rx_ring_size * sizeof(struct rx_desc); 1045 size = pep->rx_ring_size * sizeof(struct rx_desc);
1046 pep->rx_desc_area_size = size; 1046 pep->rx_desc_area_size = size;
1047 pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, 1047 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1048 &pep->rx_desc_dma, 1048 &pep->rx_desc_dma,
1049 GFP_KERNEL); 1049 GFP_KERNEL);
1050 if (!pep->p_rx_desc_area) 1050 if (!pep->p_rx_desc_area)
1051 goto out; 1051 goto out;
1052 1052
@@ -1103,9 +1103,9 @@ static int txq_init(struct net_device *dev)
1103 pep->tx_desc_count = 0; 1103 pep->tx_desc_count = 0;
1104 size = pep->tx_ring_size * sizeof(struct tx_desc); 1104 size = pep->tx_ring_size * sizeof(struct tx_desc);
1105 pep->tx_desc_area_size = size; 1105 pep->tx_desc_area_size = size;
1106 pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, 1106 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1107 &pep->tx_desc_dma, 1107 &pep->tx_desc_dma,
1108 GFP_KERNEL); 1108 GFP_KERNEL);
1109 if (!pep->p_tx_desc_area) 1109 if (!pep->p_tx_desc_area)
1110 goto out; 1110 goto out;
1111 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1111 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 04fd1f135011..654ac534b10e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
152 memset(p, 0, regs->len); 152 memset(p, 0, regs->len);
153 memcpy_fromio(p, io, B3_RAM_ADDR); 153 memcpy_fromio(p, io, B3_RAM_ADDR);
154 154
155 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 155 if (regs->len > B3_RI_WTO_R1) {
156 regs->len - B3_RI_WTO_R1); 156 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
157 regs->len - B3_RI_WTO_R1);
158 }
157} 159}
158 160
159/* Wake on Lan only supported on Yukon chips with rev 1 or above */ 161/* Wake on Lan only supported on Yukon chips with rev 1 or above */
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f3a5fa84860f..57727fe1501e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5073 INIT_WORK(&hw->restart_work, sky2_restart); 5073 INIT_WORK(&hw->restart_work, sky2_restart);
5074 5074
5075 pci_set_drvdata(pdev, hw); 5075 pci_set_drvdata(pdev, hw);
5076 pdev->d3_delay = 200; 5076 pdev->d3_delay = 300;
5077 5077
5078 return 0; 5078 return 0;
5079 5079
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 399f565dd85a..49f926b7a91c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -258,11 +258,6 @@ static void mtk_phy_link_adjust(struct net_device *dev)
258 258
259 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); 259 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
260 260
261 if (dev->phydev->link)
262 netif_carrier_on(dev);
263 else
264 netif_carrier_off(dev);
265
266 if (!of_phy_is_fixed_link(mac->of_node)) 261 if (!of_phy_is_fixed_link(mac->of_node))
267 phy_print_status(dev->phydev); 262 phy_print_status(dev->phydev);
268} 263}
@@ -347,17 +342,6 @@ static int mtk_phy_connect(struct net_device *dev)
347 if (mtk_phy_connect_node(eth, mac, np)) 342 if (mtk_phy_connect_node(eth, mac, np))
348 goto err_phy; 343 goto err_phy;
349 344
350 dev->phydev->autoneg = AUTONEG_ENABLE;
351 dev->phydev->speed = 0;
352 dev->phydev->duplex = 0;
353
354 phy_set_max_speed(dev->phydev, SPEED_1000);
355 phy_support_asym_pause(dev->phydev);
356 linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
357 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
358 dev->phydev->advertising);
359 phy_start_aneg(dev->phydev);
360
361 of_node_put(np); 345 of_node_put(np);
362 346
363 return 0; 347 return 0;
@@ -598,10 +582,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
598 dma_addr_t dma_addr; 582 dma_addr_t dma_addr;
599 int i; 583 int i;
600 584
601 eth->scratch_ring = dma_zalloc_coherent(eth->dev, 585 eth->scratch_ring = dma_alloc_coherent(eth->dev,
602 cnt * sizeof(struct mtk_tx_dma), 586 cnt * sizeof(struct mtk_tx_dma),
603 &eth->phy_scratch_ring, 587 &eth->phy_scratch_ring,
604 GFP_ATOMIC); 588 GFP_ATOMIC);
605 if (unlikely(!eth->scratch_ring)) 589 if (unlikely(!eth->scratch_ring))
606 return -ENOMEM; 590 return -ENOMEM;
607 591
@@ -1213,8 +1197,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
1213 if (!ring->buf) 1197 if (!ring->buf)
1214 goto no_tx_mem; 1198 goto no_tx_mem;
1215 1199
1216 ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, 1200 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1217 &ring->phys, GFP_ATOMIC); 1201 &ring->phys, GFP_ATOMIC);
1218 if (!ring->dma) 1202 if (!ring->dma)
1219 goto no_tx_mem; 1203 goto no_tx_mem;
1220 1204
@@ -1310,9 +1294,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1310 return -ENOMEM; 1294 return -ENOMEM;
1311 } 1295 }
1312 1296
1313 ring->dma = dma_zalloc_coherent(eth->dev, 1297 ring->dma = dma_alloc_coherent(eth->dev,
1314 rx_dma_size * sizeof(*ring->dma), 1298 rx_dma_size * sizeof(*ring->dma),
1315 &ring->phys, GFP_ATOMIC); 1299 &ring->phys, GFP_ATOMIC);
1316 if (!ring->dma) 1300 if (!ring->dma)
1317 return -ENOMEM; 1301 return -ENOMEM;
1318 1302
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 9af34e03892c..dbc483e4a2ef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -584,8 +584,8 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
584 buf->npages = 1; 584 buf->npages = 1;
585 buf->page_shift = get_order(size) + PAGE_SHIFT; 585 buf->page_shift = get_order(size) + PAGE_SHIFT;
586 buf->direct.buf = 586 buf->direct.buf =
587 dma_zalloc_coherent(&dev->persist->pdev->dev, 587 dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
588 size, &t, GFP_KERNEL); 588 GFP_KERNEL);
589 if (!buf->direct.buf) 589 if (!buf->direct.buf)
590 return -ENOMEM; 590 return -ENOMEM;
591 591
@@ -624,8 +624,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
624 624
625 for (i = 0; i < buf->nbufs; ++i) { 625 for (i = 0; i < buf->nbufs; ++i) {
626 buf->page_list[i].buf = 626 buf->page_list[i].buf =
627 dma_zalloc_coherent(&dev->persist->pdev->dev, 627 dma_alloc_coherent(&dev->persist->pdev->dev,
628 PAGE_SIZE, &t, GFP_KERNEL); 628 PAGE_SIZE, &t, GFP_KERNEL);
629 if (!buf->page_list[i].buf) 629 if (!buf->page_list[i].buf)
630 goto err_free; 630 goto err_free;
631 631
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index db909b6069b5..65f8a4b6ed0c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -306,14 +306,16 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
306 306
307 if (entries_per_copy < entries) { 307 if (entries_per_copy < entries) {
308 for (i = 0; i < entries / entries_per_copy; i++) { 308 for (i = 0; i < entries / entries_per_copy; i++) {
309 err = copy_to_user(buf, init_ents, PAGE_SIZE); 309 err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ?
310 -EFAULT : 0;
310 if (err) 311 if (err)
311 goto out; 312 goto out;
312 313
313 buf += PAGE_SIZE; 314 buf += PAGE_SIZE;
314 } 315 }
315 } else { 316 } else {
316 err = copy_to_user(buf, init_ents, entries * cqe_size); 317 err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ?
318 -EFAULT : 0;
317 } 319 }
318 320
319out: 321out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6b88881b8e35..c1438ae52a11 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3360 dev->addr_len = ETH_ALEN; 3360 dev->addr_len = ETH_ALEN;
3361 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); 3361 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3362 if (!is_valid_ether_addr(dev->dev_addr)) { 3362 if (!is_valid_ether_addr(dev->dev_addr)) {
3363 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", 3363 en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
3364 priv->port, dev->dev_addr); 3364 priv->port, dev->dev_addr);
3365 err = -EINVAL; 3365 err = -EINVAL;
3366 goto out; 3366 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9a0881cb7f51..6c01314e87b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
617} 617}
618#endif 618#endif
619 619
620#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
621
620/* We reach this function only after checking that any of 622/* We reach this function only after checking that any of
621 * the (IPv4 | IPv6) bits are set in cqe->status. 623 * the (IPv4 | IPv6) bits are set in cqe->status.
622 */ 624 */
@@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
624 netdev_features_t dev_features) 626 netdev_features_t dev_features)
625{ 627{
626 __wsum hw_checksum = 0; 628 __wsum hw_checksum = 0;
629 void *hdr;
630
631 /* CQE csum doesn't cover padding octets in short ethernet
632 * frames. And the pad field is appended prior to calculating
633 * and appending the FCS field.
634 *
635 * Detecting these padded frames requires to verify and parse
636 * IP headers, so we simply force all those small frames to skip
637 * checksum complete.
638 */
639 if (short_frame(skb->len))
640 return -EINVAL;
627 641
628 void *hdr = (u8 *)va + sizeof(struct ethhdr); 642 hdr = (u8 *)va + sizeof(struct ethhdr);
629
630 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); 643 hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
631 644
632 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && 645 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
@@ -819,6 +832,11 @@ xdp_drop_no_cnt:
819 skb_record_rx_queue(skb, cq_ring); 832 skb_record_rx_queue(skb, cq_ring);
820 833
821 if (likely(dev->features & NETIF_F_RXCSUM)) { 834 if (likely(dev->features & NETIF_F_RXCSUM)) {
835 /* TODO: For IP non TCP/UDP packets when csum complete is
836 * not an option (not supported or any other reason) we can
837 * actually check cqe IPOK status bit and report
838 * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
839 */
822 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | 840 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
823 MLX4_CQE_STATUS_UDP)) && 841 MLX4_CQE_STATUS_UDP)) &&
824 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 842 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 7df728f1e5b5..6e501af0e532 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2067,9 +2067,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2067{ 2067{
2068 struct mlx4_cmd_mailbox *mailbox; 2068 struct mlx4_cmd_mailbox *mailbox;
2069 __be32 *outbox; 2069 __be32 *outbox;
2070 u64 qword_field;
2070 u32 dword_field; 2071 u32 dword_field;
2071 int err; 2072 u16 word_field;
2072 u8 byte_field; 2073 u8 byte_field;
2074 int err;
2073 static const u8 a0_dmfs_query_hw_steering[] = { 2075 static const u8 a0_dmfs_query_hw_steering[] = {
2074 [0] = MLX4_STEERING_DMFS_A0_DEFAULT, 2076 [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
2075 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, 2077 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@@ -2097,19 +2099,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2097 2099
2098 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 2100 /* QPC/EEC/CQC/EQC/RDMARC attributes */
2099 2101
2100 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); 2102 MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
2101 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); 2103 param->qpc_base = qword_field & ~((u64)0x1f);
2102 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); 2104 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
2103 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); 2105 param->log_num_qps = byte_field & 0x1f;
2104 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); 2106 MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
2105 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); 2107 param->srqc_base = qword_field & ~((u64)0x1f);
2106 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); 2108 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
2107 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 2109 param->log_num_srqs = byte_field & 0x1f;
2108 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 2110 MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
2109 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 2111 param->cqc_base = qword_field & ~((u64)0x1f);
2110 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); 2112 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
2111 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 2113 param->log_num_cqs = byte_field & 0x1f;
2112 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 2114 MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
2115 param->altc_base = qword_field;
2116 MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
2117 param->auxc_base = qword_field;
2118 MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
2119 param->eqc_base = qword_field & ~((u64)0x1f);
2120 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
2121 param->log_num_eqs = byte_field & 0x1f;
2122 MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
2123 param->num_sys_eqs = word_field & 0xfff;
2124 MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
2125 param->rdmarc_base = qword_field & ~((u64)0x1f);
2126 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
2127 param->log_rd_per_qp = byte_field & 0x7;
2113 2128
2114 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); 2129 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
2115 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { 2130 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@@ -2128,22 +2143,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2128 /* steering attributes */ 2143 /* steering attributes */
2129 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2144 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2130 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 2145 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
2131 MLX4_GET(param->log_mc_entry_sz, outbox, 2146 MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
2132 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 2147 param->log_mc_entry_sz = byte_field & 0x1f;
2133 MLX4_GET(param->log_mc_table_sz, outbox, 2148 MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
2134 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 2149 param->log_mc_table_sz = byte_field & 0x1f;
2135 MLX4_GET(byte_field, outbox, 2150 MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
2136 INIT_HCA_FS_A0_OFFSET);
2137 param->dmfs_high_steer_mode = 2151 param->dmfs_high_steer_mode =
2138 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; 2152 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
2139 } else { 2153 } else {
2140 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 2154 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
2141 MLX4_GET(param->log_mc_entry_sz, outbox, 2155 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
2142 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2156 param->log_mc_entry_sz = byte_field & 0x1f;
2143 MLX4_GET(param->log_mc_hash_sz, outbox, 2157 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
2144 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2158 param->log_mc_hash_sz = byte_field & 0x1f;
2145 MLX4_GET(param->log_mc_table_sz, outbox, 2159 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
2146 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2160 param->log_mc_table_sz = byte_field & 0x1f;
2147 } 2161 }
2148 2162
2149 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 2163 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@@ -2167,15 +2181,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2167 /* TPT attributes */ 2181 /* TPT attributes */
2168 2182
2169 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); 2183 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
2170 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); 2184 MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
2171 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); 2185 param->mw_enabled = byte_field >> 7;
2186 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
2187 param->log_mpt_sz = byte_field & 0x3f;
2172 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); 2188 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
2173 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); 2189 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
2174 2190
2175 /* UAR attributes */ 2191 /* UAR attributes */
2176 2192
2177 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2193 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
2178 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); 2194 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
2195 param->log_uar_sz = byte_field & 0xf;
2179 2196
2180 /* phv_check enable */ 2197 /* phv_check enable */
2181 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); 2198 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 4b4351141b94..d89a3da89e5a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
57 int i; 57 int i;
58 58
59 if (chunk->nsg > 0) 59 if (chunk->nsg > 0)
60 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages,
61 PCI_DMA_BIDIRECTIONAL); 61 DMA_BIDIRECTIONAL);
62 62
63 for (i = 0; i < chunk->npages; ++i) 63 for (i = 0; i < chunk->npages; ++i)
64 __free_pages(sg_page(&chunk->mem[i]), 64 __free_pages(sg_page(&chunk->sg[i]),
65 get_order(chunk->mem[i].length)); 65 get_order(chunk->sg[i].length));
66} 66}
67 67
68static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 68static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
71 71
72 for (i = 0; i < chunk->npages; ++i) 72 for (i = 0; i < chunk->npages; ++i)
73 dma_free_coherent(&dev->persist->pdev->dev, 73 dma_free_coherent(&dev->persist->pdev->dev,
74 chunk->mem[i].length, 74 chunk->buf[i].size,
75 lowmem_page_address(sg_page(&chunk->mem[i])), 75 chunk->buf[i].addr,
76 sg_dma_address(&chunk->mem[i])); 76 chunk->buf[i].dma_addr);
77} 77}
78 78
79void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) 79void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
111 return 0; 111 return 0;
112} 112}
113 113
114static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, 114static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
115 int order, gfp_t gfp_mask) 115 int order, gfp_t gfp_mask)
116{ 116{
117 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, 117 buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
118 &sg_dma_address(mem), gfp_mask); 118 &buf->dma_addr, gfp_mask);
119 if (!buf) 119 if (!buf->addr)
120 return -ENOMEM; 120 return -ENOMEM;
121 121
122 if (offset_in_page(buf)) { 122 if (offset_in_page(buf->addr)) {
123 dma_free_coherent(dev, PAGE_SIZE << order, 123 dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
124 buf, sg_dma_address(mem)); 124 buf->dma_addr);
125 return -ENOMEM; 125 return -ENOMEM;
126 } 126 }
127 127
128 sg_set_buf(mem, buf, PAGE_SIZE << order); 128 buf->size = PAGE_SIZE << order;
129 sg_dma_len(mem) = PAGE_SIZE << order;
130 return 0; 129 return 0;
131} 130}
132 131
@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
159 158
160 while (npages > 0) { 159 while (npages > 0) {
161 if (!chunk) { 160 if (!chunk) {
162 chunk = kmalloc_node(sizeof(*chunk), 161 chunk = kzalloc_node(sizeof(*chunk),
163 gfp_mask & ~(__GFP_HIGHMEM | 162 gfp_mask & ~(__GFP_HIGHMEM |
164 __GFP_NOWARN), 163 __GFP_NOWARN),
165 dev->numa_node); 164 dev->numa_node);
166 if (!chunk) { 165 if (!chunk) {
167 chunk = kmalloc(sizeof(*chunk), 166 chunk = kzalloc(sizeof(*chunk),
168 gfp_mask & ~(__GFP_HIGHMEM | 167 gfp_mask & ~(__GFP_HIGHMEM |
169 __GFP_NOWARN)); 168 __GFP_NOWARN));
170 if (!chunk) 169 if (!chunk)
171 goto fail; 170 goto fail;
172 } 171 }
172 chunk->coherent = coherent;
173 173
174 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); 174 if (!coherent)
175 chunk->npages = 0; 175 sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
176 chunk->nsg = 0;
177 list_add_tail(&chunk->list, &icm->chunk_list); 176 list_add_tail(&chunk->list, &icm->chunk_list);
178 } 177 }
179 178
@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
186 185
187 if (coherent) 186 if (coherent)
188 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, 187 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
189 &chunk->mem[chunk->npages], 188 &chunk->buf[chunk->npages],
190 cur_order, mask); 189 cur_order, mask);
191 else 190 else
192 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
193 cur_order, mask, 192 cur_order, mask,
194 dev->numa_node); 193 dev->numa_node);
195 194
@@ -205,9 +204,9 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
205 if (coherent) 204 if (coherent)
206 ++chunk->nsg; 205 ++chunk->nsg;
207 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
208 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, 207 chunk->nsg = dma_map_sg(&dev->persist->pdev->dev,
209 chunk->npages, 208 chunk->sg, chunk->npages,
210 PCI_DMA_BIDIRECTIONAL); 209 DMA_BIDIRECTIONAL);
211 210
212 if (chunk->nsg <= 0) 211 if (chunk->nsg <= 0)
213 goto fail; 212 goto fail;
@@ -220,9 +219,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
220 } 219 }
221 220
222 if (!coherent && chunk) { 221 if (!coherent && chunk) {
223 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, 222 chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
224 chunk->npages, 223 chunk->npages, DMA_BIDIRECTIONAL);
225 PCI_DMA_BIDIRECTIONAL);
226 224
227 if (chunk->nsg <= 0) 225 if (chunk->nsg <= 0)
228 goto fail; 226 goto fail;
@@ -320,7 +318,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
320 u64 idx; 318 u64 idx;
321 struct mlx4_icm_chunk *chunk; 319 struct mlx4_icm_chunk *chunk;
322 struct mlx4_icm *icm; 320 struct mlx4_icm *icm;
323 struct page *page = NULL; 321 void *addr = NULL;
324 322
325 if (!table->lowmem) 323 if (!table->lowmem)
326 return NULL; 324 return NULL;
@@ -336,28 +334,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
336 334
337 list_for_each_entry(chunk, &icm->chunk_list, list) { 335 list_for_each_entry(chunk, &icm->chunk_list, list) {
338 for (i = 0; i < chunk->npages; ++i) { 336 for (i = 0; i < chunk->npages; ++i) {
337 dma_addr_t dma_addr;
338 size_t len;
339
340 if (table->coherent) {
341 len = chunk->buf[i].size;
342 dma_addr = chunk->buf[i].dma_addr;
343 addr = chunk->buf[i].addr;
344 } else {
345 struct page *page;
346
347 len = sg_dma_len(&chunk->sg[i]);
348 dma_addr = sg_dma_address(&chunk->sg[i]);
349
350 /* XXX: we should never do this for highmem
351 * allocation. This function either needs
352 * to be split, or the kernel virtual address
353 * return needs to be made optional.
354 */
355 page = sg_page(&chunk->sg[i]);
356 addr = lowmem_page_address(page);
357 }
358
339 if (dma_handle && dma_offset >= 0) { 359 if (dma_handle && dma_offset >= 0) {
340 if (sg_dma_len(&chunk->mem[i]) > dma_offset) 360 if (len > dma_offset)
341 *dma_handle = sg_dma_address(&chunk->mem[i]) + 361 *dma_handle = dma_addr + dma_offset;
342 dma_offset; 362 dma_offset -= len;
343 dma_offset -= sg_dma_len(&chunk->mem[i]);
344 } 363 }
364
345 /* 365 /*
346 * DMA mapping can merge pages but not split them, 366 * DMA mapping can merge pages but not split them,
347 * so if we found the page, dma_handle has already 367 * so if we found the page, dma_handle has already
348 * been assigned to. 368 * been assigned to.
349 */ 369 */
350 if (chunk->mem[i].length > offset) { 370 if (len > offset)
351 page = sg_page(&chunk->mem[i]);
352 goto out; 371 goto out;
353 } 372 offset -= len;
354 offset -= chunk->mem[i].length;
355 } 373 }
356 } 374 }
357 375
376 addr = NULL;
358out: 377out:
359 mutex_unlock(&table->mutex); 378 mutex_unlock(&table->mutex);
360 return page ? lowmem_page_address(page) + offset : NULL; 379 return addr ? addr + offset : NULL;
361} 380}
362 381
363int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 382int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index c9169a490557..d199874b1c07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -47,11 +47,21 @@ enum {
47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, 47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
48}; 48};
49 49
50struct mlx4_icm_buf {
51 void *addr;
52 size_t size;
53 dma_addr_t dma_addr;
54};
55
50struct mlx4_icm_chunk { 56struct mlx4_icm_chunk {
51 struct list_head list; 57 struct list_head list;
52 int npages; 58 int npages;
53 int nsg; 59 int nsg;
54 struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; 60 bool coherent;
61 union {
62 struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
63 struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
64 };
55}; 65};
56 66
57struct mlx4_icm { 67struct mlx4_icm {
@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
114 124
115static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) 125static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
116{ 126{
117 return sg_dma_address(&iter->chunk->mem[iter->page_idx]); 127 if (iter->chunk->coherent)
128 return iter->chunk->buf[iter->page_idx].dma_addr;
129 else
130 return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
118} 131}
119 132
120static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) 133static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
121{ 134{
122 return sg_dma_len(&iter->chunk->mem[iter->page_idx]); 135 if (iter->chunk->coherent)
136 return iter->chunk->buf[iter->page_idx].size;
137 else
138 return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
123} 139}
124 140
125int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); 141int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 456f30007ad6..421b9c3c8bf7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -63,8 +63,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
63 mutex_lock(&priv->alloc_mutex); 63 mutex_lock(&priv->alloc_mutex);
64 original_node = dev_to_node(&dev->pdev->dev); 64 original_node = dev_to_node(&dev->pdev->dev);
65 set_dev_node(&dev->pdev->dev, node); 65 set_dev_node(&dev->pdev->dev, node);
66 cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, 66 cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
67 dma_handle, GFP_KERNEL); 67 GFP_KERNEL);
68 set_dev_node(&dev->pdev->dev, original_node); 68 set_dev_node(&dev->pdev->dev, original_node);
69 mutex_unlock(&priv->alloc_mutex); 69 mutex_unlock(&priv->alloc_mutex);
70 return cpu_handle; 70 return cpu_handle;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d3125cdf69db..e267ff93e8a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1583,6 +1583,24 @@ no_trig:
1583 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1583 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1584} 1584}
1585 1585
1586void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1587{
1588 struct mlx5_cmd *cmd = &dev->cmd;
1589 int i;
1590
1591 for (i = 0; i < cmd->max_reg_cmds; i++)
1592 while (down_trylock(&cmd->sem))
1593 mlx5_cmd_trigger_completions(dev);
1594
1595 while (down_trylock(&cmd->pages_sem))
1596 mlx5_cmd_trigger_completions(dev);
1597
1598 /* Unlock cmdif */
1599 up(&cmd->pages_sem);
1600 for (i = 0; i < cmd->max_reg_cmds; i++)
1601 up(&cmd->sem);
1602}
1603
1586static int status_to_err(u8 status) 1604static int status_to_err(u8 status)
1587{ 1605{
1588 return status ? -1 : 0; /* TBD more meaningful codes */ 1606 return status ? -1 : 0; /* TBD more meaningful codes */
@@ -1789,8 +1807,8 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1789{ 1807{
1790 struct device *ddev = &dev->pdev->dev; 1808 struct device *ddev = &dev->pdev->dev;
1791 1809
1792 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, 1810 cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1793 &cmd->alloc_dma, GFP_KERNEL); 1811 &cmd->alloc_dma, GFP_KERNEL);
1794 if (!cmd->cmd_alloc_buf) 1812 if (!cmd->cmd_alloc_buf)
1795 return -ENOMEM; 1813 return -ENOMEM;
1796 1814
@@ -1804,9 +1822,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1804 1822
1805 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, 1823 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1806 cmd->alloc_dma); 1824 cmd->alloc_dma);
1807 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 1825 cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
1808 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 1826 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1809 &cmd->alloc_dma, GFP_KERNEL); 1827 &cmd->alloc_dma, GFP_KERNEL);
1810 if (!cmd->cmd_alloc_buf) 1828 if (!cmd->cmd_alloc_buf)
1811 return -ENOMEM; 1829 return -ENOMEM;
1812 1830
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8fa8fdd30b85..448a92561567 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -657,6 +657,7 @@ struct mlx5e_channel_stats {
657enum { 657enum {
658 MLX5E_STATE_OPENED, 658 MLX5E_STATE_OPENED,
659 MLX5E_STATE_DESTROYING, 659 MLX5E_STATE_DESTROYING,
660 MLX5E_STATE_XDP_TX_ENABLED,
660}; 661};
661 662
662struct mlx5e_rqt { 663struct mlx5e_rqt {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 046948ead152..f3c7ab6faea5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -256,6 +256,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
256 e->m_neigh.family = n->ops->family; 256 e->m_neigh.family = n->ops->family;
257 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 257 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
258 e->out_dev = out_dev; 258 e->out_dev = out_dev;
259 e->route_dev = route_dev;
259 260
260 /* It's important to add the neigh to the hash table before checking 261 /* It's important to add the neigh to the hash table before checking
261 * the neigh validity state. So if we'll get a notification, in case the 262 * the neigh validity state. So if we'll get a notification, in case the
@@ -369,6 +370,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
369 e->m_neigh.family = n->ops->family; 370 e->m_neigh.family = n->ops->family;
370 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 371 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
371 e->out_dev = out_dev; 372 e->out_dev = out_dev;
373 e->route_dev = route_dev;
372 374
373 /* It's importent to add the neigh to the hash table before checking 375 /* It's importent to add the neigh to the hash table before checking
374 * the neigh validity state. So if we'll get a notification, in case the 376 * the neigh validity state. So if we'll get a notification, in case the
@@ -612,16 +614,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
612 struct mlx5_flow_spec *spec, 614 struct mlx5_flow_spec *spec,
613 struct tc_cls_flower_offload *f, 615 struct tc_cls_flower_offload *f,
614 void *headers_c, 616 void *headers_c,
615 void *headers_v) 617 void *headers_v, u8 *match_level)
616{ 618{
617 int tunnel_type; 619 int tunnel_type;
618 int err = 0; 620 int err = 0;
619 621
620 tunnel_type = mlx5e_tc_tun_get_type(filter_dev); 622 tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
621 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 623 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
624 *match_level = MLX5_MATCH_L4;
622 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, 625 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
623 headers_c, headers_v); 626 headers_c, headers_v);
624 } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { 627 } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
628 *match_level = MLX5_MATCH_L3;
625 err = mlx5e_tc_tun_parse_gretap(priv, spec, f, 629 err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
626 headers_c, headers_v); 630 headers_c, headers_v);
627 } else { 631 } else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 706ce7bf15e7..b63f15de899d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
39 struct mlx5_flow_spec *spec, 39 struct mlx5_flow_spec *spec,
40 struct tc_cls_flower_offload *f, 40 struct tc_cls_flower_offload *f,
41 void *headers_c, 41 void *headers_c,
42 void *headers_v); 42 void *headers_v, u8 *match_level);
43 43
44#endif //__MLX5_EN_TC_TUNNEL_H__ 44#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 3740177eed09..03b2a9f9c589 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
365 int sq_num; 365 int sq_num;
366 int i; 366 int i;
367 367
368 if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) 368 /* this flag is sufficient, no need to test internal sq state */
369 if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
369 return -ENETDOWN; 370 return -ENETDOWN;
370 371
371 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 372 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
378 379
379 sq = &priv->channels.c[sq_num]->xdpsq; 380 sq = &priv->channels.c[sq_num]->xdpsq;
380 381
381 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
382 return -ENETDOWN;
383
384 for (i = 0; i < n; i++) { 382 for (i = 0; i < n; i++) {
385 struct xdp_frame *xdpf = frames[i]; 383 struct xdp_frame *xdpf = frames[i];
386 struct mlx5e_xdp_info xdpi; 384 struct mlx5e_xdp_info xdpi;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 3a67cb3cd179..ee27a7c8cd87 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
50int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 50int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
51 u32 flags); 51 u32 flags);
52 52
53static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
54{
55 set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
56}
57
58static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
59{
60 clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
61 /* let other device's napi(s) see our new state */
62 synchronize_rcu();
63}
64
65static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
66{
67 return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
68}
69
53static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) 70static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
54{ 71{
55 if (sq->doorbell_cseg) { 72 if (sq->doorbell_cseg) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c9df08133718..47233b9a4f81 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -354,9 +354,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
354 354
355 new_channels.params = priv->channels.params; 355 new_channels.params = priv->channels.params;
356 new_channels.params.num_channels = count; 356 new_channels.params.num_channels = count;
357 if (!netif_is_rxfh_configured(priv->netdev))
358 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
359 MLX5E_INDIR_RQT_SIZE, count);
360 357
361 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 358 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
362 priv->channels.params = new_channels.params; 359 priv->channels.params = new_channels.params;
@@ -372,6 +369,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
372 if (arfs_enabled) 369 if (arfs_enabled)
373 mlx5e_arfs_disable(priv); 370 mlx5e_arfs_disable(priv);
374 371
372 if (!netif_is_rxfh_configured(priv->netdev))
373 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
374 MLX5E_INDIR_RQT_SIZE, count);
375
375 /* Switch to new channels, set new parameters and close old ones */ 376 /* Switch to new channels, set new parameters and close old ones */
376 mlx5e_switch_priv_channels(priv, &new_channels, NULL); 377 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
377 378
@@ -844,9 +845,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
844 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, 845 ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
845 Autoneg); 846 Autoneg);
846 847
847 if (get_fec_supported_advertised(mdev, link_ksettings)) 848 err = get_fec_supported_advertised(mdev, link_ksettings);
849 if (err) {
848 netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n", 850 netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n",
849 __func__, err); 851 __func__, err);
852 err = 0; /* don't fail caps query because of FEC error */
853 }
850 854
851 if (!an_disable_admin) 855 if (!an_disable_admin)
852 ethtool_link_ksettings_add_link_mode(link_ksettings, 856 ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8cfd2ec7c0a2..93e50ccd44c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -950,7 +950,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
950 if (params->rx_dim_enabled) 950 if (params->rx_dim_enabled)
951 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); 951 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
952 952
953 if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) 953 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
954 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); 954 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
955 955
956 return 0; 956 return 0;
@@ -2938,6 +2938,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2938 2938
2939 mlx5e_build_tx2sq_maps(priv); 2939 mlx5e_build_tx2sq_maps(priv);
2940 mlx5e_activate_channels(&priv->channels); 2940 mlx5e_activate_channels(&priv->channels);
2941 mlx5e_xdp_tx_enable(priv);
2941 netif_tx_start_all_queues(priv->netdev); 2942 netif_tx_start_all_queues(priv->netdev);
2942 2943
2943 if (mlx5e_is_vport_rep(priv)) 2944 if (mlx5e_is_vport_rep(priv))
@@ -2959,6 +2960,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2959 */ 2960 */
2960 netif_tx_stop_all_queues(priv->netdev); 2961 netif_tx_stop_all_queues(priv->netdev);
2961 netif_tx_disable(priv->netdev); 2962 netif_tx_disable(priv->netdev);
2963 mlx5e_xdp_tx_disable(priv);
2962 mlx5e_deactivate_channels(&priv->channels); 2964 mlx5e_deactivate_channels(&priv->channels);
2963} 2965}
2964 2966
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 96cc0c6a4014..ef9e472daffb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -58,7 +58,8 @@ struct mlx5e_rep_indr_block_priv {
58 struct list_head list; 58 struct list_head list;
59}; 59};
60 60
61static void mlx5e_rep_indr_unregister_block(struct net_device *netdev); 61static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
62 struct net_device *netdev);
62 63
63static void mlx5e_rep_get_drvinfo(struct net_device *dev, 64static void mlx5e_rep_get_drvinfo(struct net_device *dev,
64 struct ethtool_drvinfo *drvinfo) 65 struct ethtool_drvinfo *drvinfo)
@@ -179,6 +180,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
179 180
180 s->tx_packets += sq_stats->packets; 181 s->tx_packets += sq_stats->packets;
181 s->tx_bytes += sq_stats->bytes; 182 s->tx_bytes += sq_stats->bytes;
183 s->tx_queue_dropped += sq_stats->dropped;
182 } 184 }
183 } 185 }
184} 186}
@@ -594,6 +596,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
594 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { 596 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
595 ether_addr_copy(e->h_dest, ha); 597 ether_addr_copy(e->h_dest, ha);
596 ether_addr_copy(eth->h_dest, ha); 598 ether_addr_copy(eth->h_dest, ha);
599 /* Update the encap source mac, in case that we delete
600 * the flows when encap source mac changed.
601 */
602 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
597 603
598 mlx5e_tc_encap_flows_add(priv, e); 604 mlx5e_tc_encap_flows_add(priv, e);
599 } 605 }
@@ -663,7 +669,7 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
663 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; 669 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
664 670
665 list_for_each_entry_safe(cb_priv, temp, head, list) { 671 list_for_each_entry_safe(cb_priv, temp, head, list) {
666 mlx5e_rep_indr_unregister_block(cb_priv->netdev); 672 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
667 kfree(cb_priv); 673 kfree(cb_priv);
668 } 674 }
669} 675}
@@ -735,7 +741,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
735 741
736 err = tcf_block_cb_register(f->block, 742 err = tcf_block_cb_register(f->block,
737 mlx5e_rep_indr_setup_block_cb, 743 mlx5e_rep_indr_setup_block_cb,
738 netdev, indr_priv, f->extack); 744 indr_priv, indr_priv, f->extack);
739 if (err) { 745 if (err) {
740 list_del(&indr_priv->list); 746 list_del(&indr_priv->list);
741 kfree(indr_priv); 747 kfree(indr_priv);
@@ -743,14 +749,15 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
743 749
744 return err; 750 return err;
745 case TC_BLOCK_UNBIND: 751 case TC_BLOCK_UNBIND:
752 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
753 if (!indr_priv)
754 return -ENOENT;
755
746 tcf_block_cb_unregister(f->block, 756 tcf_block_cb_unregister(f->block,
747 mlx5e_rep_indr_setup_block_cb, 757 mlx5e_rep_indr_setup_block_cb,
748 netdev); 758 indr_priv);
749 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); 759 list_del(&indr_priv->list);
750 if (indr_priv) { 760 kfree(indr_priv);
751 list_del(&indr_priv->list);
752 kfree(indr_priv);
753 }
754 761
755 return 0; 762 return 0;
756 default: 763 default:
@@ -779,7 +786,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
779 786
780 err = __tc_indr_block_cb_register(netdev, rpriv, 787 err = __tc_indr_block_cb_register(netdev, rpriv,
781 mlx5e_rep_indr_setup_tc_cb, 788 mlx5e_rep_indr_setup_tc_cb,
782 netdev); 789 rpriv);
783 if (err) { 790 if (err) {
784 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); 791 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
785 792
@@ -789,10 +796,11 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
789 return err; 796 return err;
790} 797}
791 798
792static void mlx5e_rep_indr_unregister_block(struct net_device *netdev) 799static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
800 struct net_device *netdev)
793{ 801{
794 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, 802 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
795 netdev); 803 rpriv);
796} 804}
797 805
798static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, 806static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
@@ -811,7 +819,7 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
811 mlx5e_rep_indr_register_block(rpriv, netdev); 819 mlx5e_rep_indr_register_block(rpriv, netdev);
812 break; 820 break;
813 case NETDEV_UNREGISTER: 821 case NETDEV_UNREGISTER:
814 mlx5e_rep_indr_unregister_block(netdev); 822 mlx5e_rep_indr_unregister_block(rpriv, netdev);
815 break; 823 break;
816 } 824 }
817 return NOTIFY_OK; 825 return NOTIFY_OK;
@@ -1122,9 +1130,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
1122 struct mlx5e_priv *priv = netdev_priv(dev); 1130 struct mlx5e_priv *priv = netdev_priv(dev);
1123 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1131 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1124 struct mlx5_eswitch_rep *rep = rpriv->rep; 1132 struct mlx5_eswitch_rep *rep = rpriv->rep;
1125 int ret; 1133 int ret, pf_num;
1134
1135 ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num);
1136 if (ret)
1137 return ret;
1138
1139 if (rep->vport == FDB_UPLINK_VPORT)
1140 ret = snprintf(buf, len, "p%d", pf_num);
1141 else
1142 ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
1126 1143
1127 ret = snprintf(buf, len, "%d", rep->vport - 1);
1128 if (ret >= len) 1144 if (ret >= len)
1129 return -EOPNOTSUPP; 1145 return -EOPNOTSUPP;
1130 1146
@@ -1281,6 +1297,18 @@ static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1281 return 0; 1297 return 0;
1282} 1298}
1283 1299
1300static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1301 __be16 vlan_proto)
1302{
1303 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1304
1305 if (vlan != 0)
1306 return -EOPNOTSUPP;
1307
1308 /* allow setting 0-vid for compatibility with libvirt */
1309 return 0;
1310}
1311
1284static const struct switchdev_ops mlx5e_rep_switchdev_ops = { 1312static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
1285 .switchdev_port_attr_get = mlx5e_attr_get, 1313 .switchdev_port_attr_get = mlx5e_attr_get,
1286}; 1314};
@@ -1315,6 +1343,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1315 .ndo_set_vf_rate = mlx5e_set_vf_rate, 1343 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1316 .ndo_get_vf_config = mlx5e_get_vf_config, 1344 .ndo_get_vf_config = mlx5e_get_vf_config,
1317 .ndo_get_vf_stats = mlx5e_get_vf_stats, 1345 .ndo_get_vf_stats = mlx5e_get_vf_stats,
1346 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
1318}; 1347};
1319 1348
1320bool mlx5e_eswitch_rep(struct net_device *netdev) 1349bool mlx5e_eswitch_rep(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index edd722824697..36eafc877e6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -148,6 +148,7 @@ struct mlx5e_encap_entry {
148 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 148 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
149 149
150 struct net_device *out_dev; 150 struct net_device *out_dev;
151 struct net_device *route_dev;
151 int tunnel_type; 152 int tunnel_type;
152 int tunnel_hlen; 153 int tunnel_hlen;
153 int reformat_type; 154 int reformat_type;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1d0bb5ff8c26..f86e4804e83e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -732,6 +732,8 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
732 ((struct ipv6hdr *)ip_p)->nexthdr; 732 ((struct ipv6hdr *)ip_p)->nexthdr;
733} 733}
734 734
735#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
736
735static inline void mlx5e_handle_csum(struct net_device *netdev, 737static inline void mlx5e_handle_csum(struct net_device *netdev,
736 struct mlx5_cqe64 *cqe, 738 struct mlx5_cqe64 *cqe,
737 struct mlx5e_rq *rq, 739 struct mlx5e_rq *rq,
@@ -754,6 +756,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
754 if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) 756 if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
755 goto csum_unnecessary; 757 goto csum_unnecessary;
756 758
759 /* CQE csum doesn't cover padding octets in short ethernet
760 * frames. And the pad field is appended prior to calculating
761 * and appending the FCS field.
762 *
763 * Detecting these padded frames requires to verify and parse
764 * IP headers, so we simply force all those small frames to be
765 * CHECKSUM_UNNECESSARY even if they are not padded.
766 */
767 if (short_frame(skb->len))
768 goto csum_unnecessary;
769
757 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { 770 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
758 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) 771 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
759 goto csum_unnecessary; 772 goto csum_unnecessary;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index cae6c6d48984..b5c1b039375a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -128,6 +128,7 @@ struct mlx5e_tc_flow_parse_attr {
128 struct net_device *filter_dev; 128 struct net_device *filter_dev;
129 struct mlx5_flow_spec spec; 129 struct mlx5_flow_spec spec;
130 int num_mod_hdr_actions; 130 int num_mod_hdr_actions;
131 int max_mod_hdr_actions;
131 void *mod_hdr_actions; 132 void *mod_hdr_actions;
132 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; 133 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
133}; 134};
@@ -1302,7 +1303,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1302static int parse_tunnel_attr(struct mlx5e_priv *priv, 1303static int parse_tunnel_attr(struct mlx5e_priv *priv,
1303 struct mlx5_flow_spec *spec, 1304 struct mlx5_flow_spec *spec,
1304 struct tc_cls_flower_offload *f, 1305 struct tc_cls_flower_offload *f,
1305 struct net_device *filter_dev) 1306 struct net_device *filter_dev, u8 *match_level)
1306{ 1307{
1307 struct netlink_ext_ack *extack = f->common.extack; 1308 struct netlink_ext_ack *extack = f->common.extack;
1308 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1309 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1317,7 +1318,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
1317 int err = 0; 1318 int err = 0;
1318 1319
1319 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 1320 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1320 headers_c, headers_v); 1321 headers_c, headers_v, match_level);
1321 if (err) { 1322 if (err) {
1322 NL_SET_ERR_MSG_MOD(extack, 1323 NL_SET_ERR_MSG_MOD(extack,
1323 "failed to parse tunnel attributes"); 1324 "failed to parse tunnel attributes");
@@ -1426,7 +1427,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1426 struct mlx5_flow_spec *spec, 1427 struct mlx5_flow_spec *spec,
1427 struct tc_cls_flower_offload *f, 1428 struct tc_cls_flower_offload *f,
1428 struct net_device *filter_dev, 1429 struct net_device *filter_dev,
1429 u8 *match_level) 1430 u8 *match_level, u8 *tunnel_match_level)
1430{ 1431{
1431 struct netlink_ext_ack *extack = f->common.extack; 1432 struct netlink_ext_ack *extack = f->common.extack;
1432 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1433 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1477,7 +1478,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1477 switch (key->addr_type) { 1478 switch (key->addr_type) {
1478 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1479 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1479 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1480 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1480 if (parse_tunnel_attr(priv, spec, f, filter_dev)) 1481 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
1481 return -EOPNOTSUPP; 1482 return -EOPNOTSUPP;
1482 break; 1483 break;
1483 default: 1484 default:
@@ -1826,11 +1827,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
1826 struct mlx5_core_dev *dev = priv->mdev; 1827 struct mlx5_core_dev *dev = priv->mdev;
1827 struct mlx5_eswitch *esw = dev->priv.eswitch; 1828 struct mlx5_eswitch *esw = dev->priv.eswitch;
1828 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1829 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1830 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1829 struct mlx5_eswitch_rep *rep; 1831 struct mlx5_eswitch_rep *rep;
1830 u8 match_level;
1831 int err; 1832 int err;
1832 1833
1833 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); 1834 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
1834 1835
1835 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { 1836 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1836 rep = rpriv->rep; 1837 rep = rpriv->rep;
@@ -1846,10 +1847,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
1846 } 1847 }
1847 } 1848 }
1848 1849
1849 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) 1850 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1850 flow->esw_attr->match_level = match_level; 1851 flow->esw_attr->match_level = match_level;
1851 else 1852 flow->esw_attr->tunnel_match_level = tunnel_match_level;
1853 } else {
1852 flow->nic_attr->match_level = match_level; 1854 flow->nic_attr->match_level = match_level;
1855 }
1853 1856
1854 return err; 1857 return err;
1855} 1858}
@@ -1934,9 +1937,9 @@ static struct mlx5_fields fields[] = {
1934 OFFLOAD(UDP_DPORT, 2, udp.dest, 0), 1937 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1935}; 1938};
1936 1939
1937/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at 1940/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
1938 * max from the SW pedit action. On success, it says how many HW actions were 1941 * max from the SW pedit action. On success, attr->num_mod_hdr_actions
1939 * actually parsed. 1942 * says how many HW actions were actually parsed.
1940 */ 1943 */
1941static int offload_pedit_fields(struct pedit_headers *masks, 1944static int offload_pedit_fields(struct pedit_headers *masks,
1942 struct pedit_headers *vals, 1945 struct pedit_headers *vals,
@@ -1960,9 +1963,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
1960 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; 1963 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1961 1964
1962 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); 1965 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1963 action = parse_attr->mod_hdr_actions; 1966 action = parse_attr->mod_hdr_actions +
1964 max_actions = parse_attr->num_mod_hdr_actions; 1967 parse_attr->num_mod_hdr_actions * action_size;
1965 nactions = 0; 1968
1969 max_actions = parse_attr->max_mod_hdr_actions;
1970 nactions = parse_attr->num_mod_hdr_actions;
1966 1971
1967 for (i = 0; i < ARRAY_SIZE(fields); i++) { 1972 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1968 f = &fields[i]; 1973 f = &fields[i];
@@ -2073,7 +2078,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2073 if (!parse_attr->mod_hdr_actions) 2078 if (!parse_attr->mod_hdr_actions)
2074 return -ENOMEM; 2079 return -ENOMEM;
2075 2080
2076 parse_attr->num_mod_hdr_actions = max_actions; 2081 parse_attr->max_mod_hdr_actions = max_actions;
2077 return 0; 2082 return 0;
2078} 2083}
2079 2084
@@ -2119,9 +2124,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2119 goto out_err; 2124 goto out_err;
2120 } 2125 }
2121 2126
2122 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); 2127 if (!parse_attr->mod_hdr_actions) {
2123 if (err) 2128 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2124 goto out_err; 2129 if (err)
2130 goto out_err;
2131 }
2125 2132
2126 err = offload_pedit_fields(masks, vals, parse_attr, extack); 2133 err = offload_pedit_fields(masks, vals, parse_attr, extack);
2127 if (err < 0) 2134 if (err < 0)
@@ -2179,6 +2186,7 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
2179 2186
2180static bool modify_header_match_supported(struct mlx5_flow_spec *spec, 2187static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2181 struct tcf_exts *exts, 2188 struct tcf_exts *exts,
2189 u32 actions,
2182 struct netlink_ext_ack *extack) 2190 struct netlink_ext_ack *extack)
2183{ 2191{
2184 const struct tc_action *a; 2192 const struct tc_action *a;
@@ -2188,7 +2196,11 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2188 u16 ethertype; 2196 u16 ethertype;
2189 int nkeys, i; 2197 int nkeys, i;
2190 2198
2191 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 2199 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2200 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2201 else
2202 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2203
2192 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 2204 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2193 2205
2194 /* for non-IP we only re-write MACs, so we're okay */ 2206 /* for non-IP we only re-write MACs, so we're okay */
@@ -2245,7 +2257,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
2245 2257
2246 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 2258 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2247 return modify_header_match_supported(&parse_attr->spec, exts, 2259 return modify_header_match_supported(&parse_attr->spec, exts,
2248 extack); 2260 actions, extack);
2249 2261
2250 return true; 2262 return true;
2251} 2263}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 598ad7e4d5c9..0e55cd1f2e98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
387 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 387 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
388 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 388 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
389 if (unlikely(contig_wqebbs_room < num_wqebbs)) { 389 if (unlikely(contig_wqebbs_room < num_wqebbs)) {
390#ifdef CONFIG_MLX5_EN_IPSEC
391 struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
392#endif
390 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); 393 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
391 mlx5e_sq_fetch_wqe(sq, &wqe, &pi); 394 mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
395#ifdef CONFIG_MLX5_EN_IPSEC
396 wqe->eth = cur_eth;
397#endif
392 } 398 }
393 399
394 /* fill wqe */ 400 /* fill wqe */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index a44ea7b85614..5b492b67f4e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1134,13 +1134,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1134 int err = 0; 1134 int err = 0;
1135 u8 *smac_v; 1135 u8 *smac_v;
1136 1136
1137 if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
1138 mlx5_core_warn(esw->dev,
1139 "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1140 vport->vport);
1141 return -EPERM;
1142 }
1143
1144 esw_vport_cleanup_ingress_rules(esw, vport); 1137 esw_vport_cleanup_ingress_rules(esw, vport);
1145 1138
1146 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { 1139 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
@@ -1728,7 +1721,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1728 int vport_num; 1721 int vport_num;
1729 int err; 1722 int err;
1730 1723
1731 if (!MLX5_ESWITCH_MANAGER(dev)) 1724 if (!MLX5_VPORT_MANAGER(dev))
1732 return 0; 1725 return 0;
1733 1726
1734 esw_info(dev, 1727 esw_info(dev,
@@ -1797,7 +1790,7 @@ abort:
1797 1790
1798void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1791void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1799{ 1792{
1800 if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) 1793 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1801 return; 1794 return;
1802 1795
1803 esw_info(esw->dev, "cleanup\n"); 1796 esw_info(esw->dev, "cleanup\n");
@@ -1827,13 +1820,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1827 mutex_lock(&esw->state_lock); 1820 mutex_lock(&esw->state_lock);
1828 evport = &esw->vports[vport]; 1821 evport = &esw->vports[vport];
1829 1822
1830 if (evport->info.spoofchk && !is_valid_ether_addr(mac)) { 1823 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1831 mlx5_core_warn(esw->dev, 1824 mlx5_core_warn(esw->dev,
1832 "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", 1825 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1833 vport); 1826 vport);
1834 err = -EPERM;
1835 goto unlock;
1836 }
1837 1827
1838 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); 1828 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1839 if (err) { 1829 if (err) {
@@ -1979,6 +1969,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1979 evport = &esw->vports[vport]; 1969 evport = &esw->vports[vport];
1980 pschk = evport->info.spoofchk; 1970 pschk = evport->info.spoofchk;
1981 evport->info.spoofchk = spoofchk; 1971 evport->info.spoofchk = spoofchk;
1972 if (pschk && !is_valid_ether_addr(evport->info.mac))
1973 mlx5_core_warn(esw->dev,
1974 "Spoofchk in set while MAC is invalid, vport(%d)\n",
1975 evport->vport);
1982 if (evport->enabled && esw->mode == SRIOV_LEGACY) 1976 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1983 err = esw_vport_ingress_config(esw, evport); 1977 err = esw_vport_ingress_config(esw, evport);
1984 if (err) 1978 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 9c89eea9b2c3..748ff178a1d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -312,6 +312,7 @@ struct mlx5_esw_flow_attr {
312 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 312 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
313 u32 mod_hdr_id; 313 u32 mod_hdr_id;
314 u8 match_level; 314 u8 match_level;
315 u8 tunnel_match_level;
315 struct mlx5_fc *counter; 316 struct mlx5_fc *counter;
316 u32 chain; 317 u32 chain;
317 u16 prio; 318 u16 prio;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 53065b6ae593..d4e6fe5b9300 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -160,14 +160,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
160 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 160 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
161 source_eswitch_owner_vhca_id); 161 source_eswitch_owner_vhca_id);
162 162
163 if (attr->match_level == MLX5_MATCH_NONE) 163 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
164 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 164 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
165 else 165 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
166 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | 166 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
167 MLX5_MATCH_MISC_PARAMETERS; 167 if (attr->match_level != MLX5_MATCH_NONE)
168 168 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 169 } else if (attr->match_level != MLX5_MATCH_NONE) {
170 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 170 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
171 }
171 172
172 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 173 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
173 flow_act.modify_id = attr->mod_hdr_id; 174 flow_act.modify_id = attr->mod_hdr_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index fbc42b7252a9..503035469d2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -211,11 +211,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
211 enum port_module_event_status_type module_status; 211 enum port_module_event_status_type module_status;
212 enum port_module_event_error_type error_type; 212 enum port_module_event_error_type error_type;
213 struct mlx5_eqe_port_module *module_event_eqe; 213 struct mlx5_eqe_port_module *module_event_eqe;
214 const char *status_str, *error_str; 214 const char *status_str;
215 u8 module_num; 215 u8 module_num;
216 216
217 module_event_eqe = &eqe->data.port_module; 217 module_event_eqe = &eqe->data.port_module;
218 module_num = module_event_eqe->module;
219 module_status = module_event_eqe->module_status & 218 module_status = module_event_eqe->module_status &
220 PORT_MODULE_EVENT_MODULE_STATUS_MASK; 219 PORT_MODULE_EVENT_MODULE_STATUS_MASK;
221 error_type = module_event_eqe->error_type & 220 error_type = module_event_eqe->error_type &
@@ -223,25 +222,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
223 222
224 if (module_status < MLX5_MODULE_STATUS_NUM) 223 if (module_status < MLX5_MODULE_STATUS_NUM)
225 events->pme_stats.status_counters[module_status]++; 224 events->pme_stats.status_counters[module_status]++;
226 status_str = mlx5_pme_status_to_string(module_status);
227 225
228 if (module_status == MLX5_MODULE_STATUS_ERROR) { 226 if (module_status == MLX5_MODULE_STATUS_ERROR)
229 if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) 227 if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
230 events->pme_stats.error_counters[error_type]++; 228 events->pme_stats.error_counters[error_type]++;
231 error_str = mlx5_pme_error_to_string(error_type);
232 }
233 229
234 if (!printk_ratelimit()) 230 if (!printk_ratelimit())
235 return NOTIFY_OK; 231 return NOTIFY_OK;
236 232
237 if (module_status == MLX5_MODULE_STATUS_ERROR) 233 module_num = module_event_eqe->module;
234 status_str = mlx5_pme_status_to_string(module_status);
235 if (module_status == MLX5_MODULE_STATUS_ERROR) {
236 const char *error_str = mlx5_pme_error_to_string(error_type);
237
238 mlx5_core_err(events->dev, 238 mlx5_core_err(events->dev,
239 "Port module event[error]: module %u, %s, %s\n", 239 "Port module event[error]: module %u, %s, %s\n",
240 module_num, status_str, error_str); 240 module_num, status_str, error_str);
241 else 241 } else {
242 mlx5_core_info(events->dev, 242 mlx5_core_info(events->dev,
243 "Port module event: module %u, %s\n", 243 "Port module event: module %u, %s\n",
244 module_num, status_str); 244 module_num, status_str);
245 }
245 246
246 return NOTIFY_OK; 247 return NOTIFY_OK;
247} 248}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 196c07383082..cb9fa3430c53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
103 mlx5_core_err(dev, "start\n"); 103 mlx5_core_err(dev, "start\n");
104 if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { 104 if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
105 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 105 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
106 mlx5_cmd_trigger_completions(dev); 106 mlx5_cmd_flush(dev);
107 } 107 }
108 108
109 mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); 109 mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 3a6baed722d8..2d223385dc81 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -616,6 +616,27 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
616 } 616 }
617} 617}
618 618
619int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num)
620{
621 struct mlx5_lag *ldev;
622 int n;
623
624 ldev = mlx5_lag_dev_get(dev);
625 if (!ldev) {
626 mlx5_core_warn(dev, "no lag device, can't get pf num\n");
627 return -EINVAL;
628 }
629
630 for (n = 0; n < MLX5_MAX_PORTS; n++)
631 if (ldev->pf[n].dev == dev) {
632 *pf_num = n;
633 return 0;
634 }
635
636 mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n");
637 return -EINVAL;
638}
639
619/* Must be called with intf_mutex held */ 640/* Must be called with intf_mutex held */
620void mlx5_lag_remove(struct mlx5_core_dev *dev) 641void mlx5_lag_remove(struct mlx5_core_dev *dev)
621{ 642{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index c68dcea5985b..4fdac020b795 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -126,6 +126,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
126 struct ptp_system_timestamp *sts); 126 struct ptp_system_timestamp *sts);
127 127
128void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); 128void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
129void mlx5_cmd_flush(struct mlx5_core_dev *dev);
129int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); 130int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
130void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); 131void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
131 132
@@ -187,6 +188,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
187 MLX5_CAP_GEN(dev, lag_master); 188 MLX5_CAP_GEN(dev, lag_master);
188} 189}
189 190
191int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num);
192
190void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); 193void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
191void mlx5_lag_update(struct mlx5_core_dev *dev); 194void mlx5_lag_update(struct mlx5_core_dev *dev);
192 195
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 388f205a497f..370ca94b6775 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -44,14 +44,15 @@ static struct mlx5_core_rsc_common *
44mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) 44mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
45{ 45{
46 struct mlx5_core_rsc_common *common; 46 struct mlx5_core_rsc_common *common;
47 unsigned long flags;
47 48
48 spin_lock(&table->lock); 49 spin_lock_irqsave(&table->lock, flags);
49 50
50 common = radix_tree_lookup(&table->tree, rsn); 51 common = radix_tree_lookup(&table->tree, rsn);
51 if (common) 52 if (common)
52 atomic_inc(&common->refcount); 53 atomic_inc(&common->refcount);
53 54
54 spin_unlock(&table->lock); 55 spin_unlock_irqrestore(&table->lock, flags);
55 56
56 return common; 57 return common;
57} 58}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 080ddd1942ec..b9a25aed5d11 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
78 depends on IPV6 || IPV6=n 78 depends on IPV6 || IPV6=n
79 depends on NET_IPGRE || NET_IPGRE=n 79 depends on NET_IPGRE || NET_IPGRE=n
80 depends on IPV6_GRE || IPV6_GRE=n 80 depends on IPV6_GRE || IPV6_GRE=n
81 depends on VXLAN || VXLAN=n
81 select GENERIC_ALLOCATOR 82 select GENERIC_ALLOCATOR
82 select PARMAN 83 select PARMAN
83 select OBJAGG 84 select OBJAGG
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 66b8098c6fd2..a2321fe8d6a0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
604 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); 604 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
605 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); 605 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
606 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); 606 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
607 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
608
609 memcpy(ncqe, cqe, q->elem_size);
610 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
607 611
608 if (sendq) { 612 if (sendq) {
609 struct mlxsw_pci_queue *sdq; 613 struct mlxsw_pci_queue *sdq;
610 614
611 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); 615 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
612 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, 616 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
613 wqe_counter, cqe); 617 wqe_counter, ncqe);
614 q->u.cq.comp_sdq_count++; 618 q->u.cq.comp_sdq_count++;
615 } else { 619 } else {
616 struct mlxsw_pci_queue *rdq; 620 struct mlxsw_pci_queue *rdq;
617 621
618 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); 622 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
619 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, 623 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
620 wqe_counter, q->u.cq.v, cqe); 624 wqe_counter, q->u.cq.v, ncqe);
621 q->u.cq.comp_rdq_count++; 625 q->u.cq.comp_rdq_count++;
622 } 626 }
623 if (++items == credits) 627 if (++items == credits)
624 break; 628 break;
625 } 629 }
626 if (items) { 630 if (items)
627 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
628 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 631 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
629 }
630} 632}
631 633
632static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) 634static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
@@ -1365,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1365 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1367 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1366 1368
1367 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) 1369 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1368 break; 1370 return 0;
1369 cond_resched(); 1371 cond_resched();
1370 } while (time_before(jiffies, end)); 1372 } while (time_before(jiffies, end));
1371 return 0; 1373 return -EBUSY;
1372} 1374}
1373 1375
1374static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) 1376static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index bb99f6d41fe0..ffee38e36ce8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
27 27
28#define MLXSW_PCI_SW_RESET 0xF0010 28#define MLXSW_PCI_SW_RESET 0xF0010
29#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) 29#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
30#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 30#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
31#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 31#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
32#define MLXSW_PCI_FW_READY 0xA1844 32#define MLXSW_PCI_FW_READY 0xA1844
33#define MLXSW_PCI_FW_READY_MASK 0xFFFF 33#define MLXSW_PCI_FW_READY_MASK 0xFFFF
@@ -53,6 +53,7 @@
53#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ 53#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
54#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ 54#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
55#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ 55#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
56#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
56#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ 57#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
57#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) 58#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
58#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) 59#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index eed1045e4d96..b65e274b02e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
862 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 862 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
863 bool configure = false; 863 bool configure = false;
864 bool pfc = false; 864 bool pfc = false;
865 u16 thres_cells;
866 u16 delay_cells;
865 bool lossy; 867 bool lossy;
866 u16 thres;
867 868
868 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 869 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
869 if (prio_tc[j] == i) { 870 if (prio_tc[j] == i) {
@@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
877 continue; 878 continue;
878 879
879 lossy = !(pfc || pause_en); 880 lossy = !(pfc || pause_en);
880 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 881 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
881 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 882 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
882 pause_en); 883 pfc, pause_en);
883 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 884 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
885 thres_cells, lossy);
884 } 886 }
885 887
886 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 888 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
@@ -5005,12 +5007,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
5005 lower_dev, 5007 lower_dev,
5006 upper_dev); 5008 upper_dev);
5007 } else if (netif_is_lag_master(upper_dev)) { 5009 } else if (netif_is_lag_master(upper_dev)) {
5008 if (info->linking) 5010 if (info->linking) {
5009 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5011 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
5010 upper_dev); 5012 upper_dev);
5011 else 5013 } else {
5014 mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
5015 false);
5012 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5016 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
5013 upper_dev); 5017 upper_dev);
5018 }
5014 } else if (netif_is_ovs_master(upper_dev)) { 5019 } else if (netif_is_ovs_master(upper_dev)) {
5015 if (info->linking) 5020 if (info->linking)
5016 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5021 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index b0f2d8e8ded0..ac222833a5cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -72,7 +72,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
72 act_set = mlxsw_afa_block_first_set(rulei->act_block); 72 act_set = mlxsw_afa_block_first_set(rulei->act_block);
73 mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); 73 mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
74 74
75 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 75 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
76 if (err)
77 goto err_ptce2_write;
78
79 return 0;
80
81err_ptce2_write:
82 cregion->ops->entry_remove(cregion, centry);
83 return err;
76} 84}
77 85
78static void 86static void
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 1c19feefa5f2..2941967e1cc5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -1022,7 +1022,6 @@ void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
1022{ 1022{
1023 struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; 1023 struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
1024 1024
1025 ASSERT_RTNL();
1026 objagg_obj_put(aregion->erp_table->objagg, objagg_obj); 1025 objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
1027} 1026}
1028 1027
@@ -1054,7 +1053,6 @@ void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
1054 const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); 1053 const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
1055 unsigned int erp_bank; 1054 unsigned int erp_bank;
1056 1055
1057 ASSERT_RTNL();
1058 if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) 1056 if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
1059 return; 1057 return;
1060 1058
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 055cc6943b34..9d9aa28684af 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -997,8 +997,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
997static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { 997static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
998 .type = MLXSW_SP_FID_TYPE_DUMMY, 998 .type = MLXSW_SP_FID_TYPE_DUMMY,
999 .fid_size = sizeof(struct mlxsw_sp_fid), 999 .fid_size = sizeof(struct mlxsw_sp_fid),
1000 .start_index = MLXSW_SP_RFID_BASE - 1, 1000 .start_index = VLAN_N_VID - 1,
1001 .end_index = MLXSW_SP_RFID_BASE - 1, 1001 .end_index = VLAN_N_VID - 1,
1002 .ops = &mlxsw_sp_fid_dummy_ops, 1002 .ops = &mlxsw_sp_fid_dummy_ops,
1003}; 1003};
1004 1004
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 0a31fff2516e..fb1c48c698f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -816,14 +816,14 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
816 ops = nve->nve_ops_arr[params->type]; 816 ops = nve->nve_ops_arr[params->type];
817 817
818 if (!ops->can_offload(nve, params->dev, extack)) 818 if (!ops->can_offload(nve, params->dev, extack))
819 return -EOPNOTSUPP; 819 return -EINVAL;
820 820
821 memset(&config, 0, sizeof(config)); 821 memset(&config, 0, sizeof(config));
822 ops->nve_config(nve, params->dev, &config); 822 ops->nve_config(nve, params->dev, &config);
823 if (nve->num_nve_tunnels && 823 if (nve->num_nve_tunnels &&
824 memcmp(&config, &nve->config, sizeof(config))) { 824 memcmp(&config, &nve->config, sizeof(config))) {
825 NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); 825 NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
826 return -EOPNOTSUPP; 826 return -EINVAL;
827 } 827 }
828 828
829 err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config); 829 err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 1bd2c6e15f8d..c772109b638d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1078,8 +1078,7 @@ static int
1078mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, 1078mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1079 struct mlxsw_sp_bridge_port *bridge_port, 1079 struct mlxsw_sp_bridge_port *bridge_port,
1080 u16 vid, bool is_untagged, bool is_pvid, 1080 u16 vid, bool is_untagged, bool is_pvid,
1081 struct netlink_ext_ack *extack, 1081 struct netlink_ext_ack *extack)
1082 struct switchdev_trans *trans)
1083{ 1082{
1084 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); 1083 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1085 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1084 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
@@ -1095,9 +1094,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1095 mlxsw_sp_port_vlan->bridge_port != bridge_port) 1094 mlxsw_sp_port_vlan->bridge_port != bridge_port)
1096 return -EEXIST; 1095 return -EEXIST;
1097 1096
1098 if (switchdev_trans_ph_prepare(trans))
1099 return 0;
1100
1101 if (!mlxsw_sp_port_vlan) { 1097 if (!mlxsw_sp_port_vlan) {
1102 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1098 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1103 vid); 1099 vid);
@@ -1188,6 +1184,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1188 return err; 1184 return err;
1189 } 1185 }
1190 1186
1187 if (switchdev_trans_ph_commit(trans))
1188 return 0;
1189
1191 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1190 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1192 if (WARN_ON(!bridge_port)) 1191 if (WARN_ON(!bridge_port))
1193 return -EINVAL; 1192 return -EINVAL;
@@ -1200,7 +1199,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1200 1199
1201 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, 1200 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1202 vid, flag_untagged, 1201 vid, flag_untagged,
1203 flag_pvid, extack, trans); 1202 flag_pvid, extack);
1204 if (err) 1203 if (err)
1205 return err; 1204 return err;
1206 } 1205 }
@@ -1234,7 +1233,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1234static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 1233static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1235{ 1234{
1236 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 1235 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1237 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 1236 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1238} 1237}
1239 1238
1240static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 1239static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
@@ -1291,7 +1290,7 @@ out:
1291static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1290static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1292 const char *mac, u16 fid, bool adding, 1291 const char *mac, u16 fid, bool adding,
1293 enum mlxsw_reg_sfd_rec_action action, 1292 enum mlxsw_reg_sfd_rec_action action,
1294 bool dynamic) 1293 enum mlxsw_reg_sfd_rec_policy policy)
1295{ 1294{
1296 char *sfd_pl; 1295 char *sfd_pl;
1297 u8 num_rec; 1296 u8 num_rec;
@@ -1302,8 +1301,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1302 return -ENOMEM; 1301 return -ENOMEM;
1303 1302
1304 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1303 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1305 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1304 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1306 mac, fid, action, local_port);
1307 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1305 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1308 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1306 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1309 if (err) 1307 if (err)
@@ -1322,7 +1320,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1322 bool dynamic) 1320 bool dynamic)
1323{ 1321{
1324 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, 1322 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1325 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); 1323 MLXSW_REG_SFD_REC_ACTION_NOP,
1324 mlxsw_sp_sfd_rec_policy(dynamic));
1326} 1325}
1327 1326
1328int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 1327int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
@@ -1330,7 +1329,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1330{ 1329{
1331 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, 1330 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1332 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, 1331 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1333 false); 1332 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1334} 1333}
1335 1334
1336static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 1335static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
@@ -1808,7 +1807,7 @@ static void
1808mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, 1807mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1809 struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 1808 struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1810{ 1809{
1811 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; 1810 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1812 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1811 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1813 1812
1814 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1813 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
@@ -3207,7 +3206,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3207 struct mlxsw_sp_bridge_device *bridge_device, 3206 struct mlxsw_sp_bridge_device *bridge_device,
3208 const struct net_device *vxlan_dev, u16 vid, 3207 const struct net_device *vxlan_dev, u16 vid,
3209 bool flag_untagged, bool flag_pvid, 3208 bool flag_untagged, bool flag_pvid,
3210 struct switchdev_trans *trans,
3211 struct netlink_ext_ack *extack) 3209 struct netlink_ext_ack *extack)
3212{ 3210{
3213 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 3211 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
@@ -3225,9 +3223,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3225 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) 3223 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid))
3226 return -EINVAL; 3224 return -EINVAL;
3227 3225
3228 if (switchdev_trans_ph_prepare(trans))
3229 return 0;
3230
3231 if (!netif_running(vxlan_dev)) 3226 if (!netif_running(vxlan_dev))
3232 return 0; 3227 return 0;
3233 3228
@@ -3345,6 +3340,9 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3345 3340
3346 port_obj_info->handled = true; 3341 port_obj_info->handled = true;
3347 3342
3343 if (switchdev_trans_ph_commit(trans))
3344 return 0;
3345
3348 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 3346 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3349 if (!bridge_device) 3347 if (!bridge_device)
3350 return -EINVAL; 3348 return -EINVAL;
@@ -3358,8 +3356,7 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3358 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, 3356 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3359 vxlan_dev, vid, 3357 vxlan_dev, vid,
3360 flag_untagged, 3358 flag_untagged,
3361 flag_pvid, trans, 3359 flag_pvid, extack);
3362 extack);
3363 if (err) 3360 if (err)
3364 return err; 3361 return err;
3365 } 3362 }
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 20c9377e99cb..310807ef328b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
962 962
963 memset(&ksettings, 0, sizeof(ksettings)); 963 memset(&ksettings, 0, sizeof(ksettings));
964 phy_ethtool_get_link_ksettings(netdev, &ksettings); 964 phy_ethtool_get_link_ksettings(netdev, &ksettings);
965 local_advertisement = phy_read(phydev, MII_ADVERTISE); 965 local_advertisement =
966 if (local_advertisement < 0) 966 linkmode_adv_to_mii_adv_t(phydev->advertising);
967 return; 967 remote_advertisement =
968 968 linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
969 remote_advertisement = phy_read(phydev, MII_LPA);
970 if (remote_advertisement < 0)
971 return;
972 969
973 lan743x_phy_update_flowcontrol(adapter, 970 lan743x_phy_update_flowcontrol(adapter,
974 ksettings.base.duplex, 971 ksettings.base.duplex,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 5f384f73007d..19ce0e605096 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3604,9 +3604,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3604 for (i = 0; i < mgp->num_slices; i++) { 3604 for (i = 0; i < mgp->num_slices; i++) {
3605 ss = &mgp->ss[i]; 3605 ss = &mgp->ss[i];
3606 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); 3606 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3607 ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, 3607 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3608 &ss->rx_done.bus, 3608 &ss->rx_done.bus,
3609 GFP_KERNEL); 3609 GFP_KERNEL);
3610 if (ss->rx_done.entry == NULL) 3610 if (ss->rx_done.entry == NULL)
3611 goto abort; 3611 goto abort;
3612 bytes = sizeof(*ss->fw_stats); 3612 bytes = sizeof(*ss->fw_stats);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e23ca90289f7..0a868c829b90 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1291 1291
1292static int 1292static int
1293wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1293wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1294 enum alu_op alu_op, bool skip) 1294 enum alu_op alu_op)
1295{ 1295{
1296 const struct bpf_insn *insn = &meta->insn; 1296 const struct bpf_insn *insn = &meta->insn;
1297 1297
1298 if (skip) {
1299 meta->skip = true;
1300 return 0;
1301 }
1302
1303 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1298 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
1304 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1299 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1305 1300
@@ -2309,7 +2304,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2309 2304
2310static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2305static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2311{ 2306{
2312 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 2307 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
2313} 2308}
2314 2309
2315static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2310static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2319,7 +2314,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2319 2314
2320static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2315static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2321{ 2316{
2322 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 2317 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
2323} 2318}
2324 2319
2325static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2320static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2329,7 +2324,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2329 2324
2330static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2325static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2331{ 2326{
2332 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2327 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
2333} 2328}
2334 2329
2335static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2330static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2339,7 +2334,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2339 2334
2340static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2335static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2341{ 2336{
2342 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2337 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
2343} 2338}
2344 2339
2345static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2340static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2349,7 +2344,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2349 2344
2350static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2345static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2351{ 2346{
2352 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2347 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
2353} 2348}
2354 2349
2355static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2350static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index e97636d2e6ee..7d2d4241498f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2170,9 +2170,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
2170 tx_ring->cnt = dp->txd_cnt; 2170 tx_ring->cnt = dp->txd_cnt;
2171 2171
2172 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); 2172 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
2173 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, 2173 tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
2174 &tx_ring->dma, 2174 &tx_ring->dma,
2175 GFP_KERNEL | __GFP_NOWARN); 2175 GFP_KERNEL | __GFP_NOWARN);
2176 if (!tx_ring->txds) { 2176 if (!tx_ring->txds) {
2177 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2177 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2178 tx_ring->cnt); 2178 tx_ring->cnt);
@@ -2328,9 +2328,9 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2328 2328
2329 rx_ring->cnt = dp->rxd_cnt; 2329 rx_ring->cnt = dp->rxd_cnt;
2330 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); 2330 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
2331 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, 2331 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
2332 &rx_ring->dma, 2332 &rx_ring->dma,
2333 GFP_KERNEL | __GFP_NOWARN); 2333 GFP_KERNEL | __GFP_NOWARN);
2334 if (!rx_ring->rxds) { 2334 if (!rx_ring->rxds) {
2335 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2335 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2336 rx_ring->cnt); 2336 rx_ring->cnt);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 0611f2335b4a..1e408d1a9b5f 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -287,9 +287,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
287 priv->rx_bd_ci = 0; 287 priv->rx_bd_ci = 0;
288 288
289 /* Allocate the Tx and Rx buffer descriptors. */ 289 /* Allocate the Tx and Rx buffer descriptors. */
290 priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 290 priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
291 sizeof(*priv->tx_bd_v) * TX_BD_NUM, 291 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
292 &priv->tx_bd_p, GFP_KERNEL); 292 &priv->tx_bd_p, GFP_KERNEL);
293 if (!priv->tx_bd_v) 293 if (!priv->tx_bd_v)
294 goto out; 294 goto out;
295 295
@@ -299,9 +299,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
299 if (!priv->tx_skb) 299 if (!priv->tx_skb)
300 goto out; 300 goto out;
301 301
302 priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 302 priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
303 sizeof(*priv->rx_bd_v) * RX_BD_NUM, 303 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
304 &priv->rx_bd_p, GFP_KERNEL); 304 &priv->rx_bd_p, GFP_KERNEL);
305 if (!priv->rx_bd_v) 305 if (!priv->rx_bd_v)
306 goto out; 306 goto out;
307 307
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 43c0c10dfeb7..552d930e3940 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1440,8 +1440,8 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1440 1440
1441 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1441 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1442 rx_ring->rx_buff_pool = 1442 rx_ring->rx_buff_pool =
1443 dma_zalloc_coherent(&pdev->dev, size, 1443 dma_alloc_coherent(&pdev->dev, size,
1444 &rx_ring->rx_buff_pool_logic, GFP_KERNEL); 1444 &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1445 if (!rx_ring->rx_buff_pool) 1445 if (!rx_ring->rx_buff_pool)
1446 return -ENOMEM; 1446 return -ENOMEM;
1447 1447
@@ -1755,8 +1755,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1755 1755
1756 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1756 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1757 1757
1758 tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size, 1758 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1759 &tx_ring->dma, GFP_KERNEL); 1759 &tx_ring->dma, GFP_KERNEL);
1760 if (!tx_ring->desc) { 1760 if (!tx_ring->desc) {
1761 vfree(tx_ring->buffer_info); 1761 vfree(tx_ring->buffer_info);
1762 return -ENOMEM; 1762 return -ENOMEM;
@@ -1798,8 +1798,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1798 return -ENOMEM; 1798 return -ENOMEM;
1799 1799
1800 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1800 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1801 rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size, 1801 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1802 &rx_ring->dma, GFP_KERNEL); 1802 &rx_ring->dma, GFP_KERNEL);
1803 if (!rx_ring->desc) { 1803 if (!rx_ring->desc) {
1804 vfree(rx_ring->buffer_info); 1804 vfree(rx_ring->buffer_info);
1805 return -ENOMEM; 1805 return -ENOMEM;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 8a31a02c9f47..d21041554507 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -401,9 +401,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
401 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) 401 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
402 goto out_ring_desc; 402 goto out_ring_desc;
403 403
404 ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev, 404 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
405 RX_RING_SIZE * sizeof(u64), 405 RX_RING_SIZE * sizeof(u64),
406 &ring->buf_dma, GFP_KERNEL); 406 &ring->buf_dma, GFP_KERNEL);
407 if (!ring->buffers) 407 if (!ring->buffers)
408 goto out_ring_desc; 408 goto out_ring_desc;
409 409
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 24a90163775e..2d8a77cc156b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -53,7 +53,7 @@
53extern const struct qed_common_ops qed_common_ops_pass; 53extern const struct qed_common_ops qed_common_ops_pass;
54 54
55#define QED_MAJOR_VERSION 8 55#define QED_MAJOR_VERSION 8
56#define QED_MINOR_VERSION 33 56#define QED_MINOR_VERSION 37
57#define QED_REVISION_VERSION 0 57#define QED_REVISION_VERSION 0
58#define QED_ENGINEERING_VERSION 20 58#define QED_ENGINEERING_VERSION 20
59 59
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index dc1c1b616084..c2ad405b2f50 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -936,9 +936,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
936 u32 size = min_t(u32, total_size, psz); 936 u32 size = min_t(u32, total_size, psz);
937 void **p_virt = &p_mngr->t2[i].p_virt; 937 void **p_virt = &p_mngr->t2[i].p_virt;
938 938
939 *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, 939 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
940 size, &p_mngr->t2[i].p_phys, 940 &p_mngr->t2[i].p_phys,
941 GFP_KERNEL); 941 GFP_KERNEL);
942 if (!p_mngr->t2[i].p_virt) { 942 if (!p_mngr->t2[i].p_virt) {
943 rc = -ENOMEM; 943 rc = -ENOMEM;
944 goto t2_fail; 944 goto t2_fail;
@@ -1054,8 +1054,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
1054 u32 size; 1054 u32 size;
1055 1055
1056 size = min_t(u32, sz_left, p_blk->real_size_in_page); 1056 size = min_t(u32, sz_left, p_blk->real_size_in_page);
1057 p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, 1057 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
1058 &p_phys, GFP_KERNEL); 1058 &p_phys, GFP_KERNEL);
1059 if (!p_virt) 1059 if (!p_virt)
1060 return -ENOMEM; 1060 return -ENOMEM;
1061 1061
@@ -2306,9 +2306,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2306 goto out0; 2306 goto out0;
2307 } 2307 }
2308 2308
2309 p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, 2309 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2310 p_blk->real_size_in_page, &p_phys, 2310 p_blk->real_size_in_page, &p_phys,
2311 GFP_KERNEL); 2311 GFP_KERNEL);
2312 if (!p_virt) { 2312 if (!p_virt) {
2313 rc = -ENOMEM; 2313 rc = -ENOMEM;
2314 goto out1; 2314 goto out1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 8f6551421945..2ecaaaa4469a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -795,19 +795,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
795 795
796/* get pq index according to PQ_FLAGS */ 796/* get pq index according to PQ_FLAGS */
797static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, 797static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
798 u32 pq_flags) 798 unsigned long pq_flags)
799{ 799{
800 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 800 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
801 801
802 /* Can't have multiple flags set here */ 802 /* Can't have multiple flags set here */
803 if (bitmap_weight((unsigned long *)&pq_flags, 803 if (bitmap_weight(&pq_flags,
804 sizeof(pq_flags) * BITS_PER_BYTE) > 1) { 804 sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
805 DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); 805 DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
806 goto err; 806 goto err;
807 } 807 }
808 808
809 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { 809 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
810 DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); 810 DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
811 goto err; 811 goto err;
812 } 812 }
813 813
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index beb8e5d6401a..ded556b7bab5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1688 1688
1689 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); 1689 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1690 1690
1691 if (!ether_addr_equal(ethh->h_dest,
1692 p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1693 DP_VERBOSE(p_hwfn,
1694 QED_MSG_RDMA,
1695 "Got unexpected mac %pM instead of %pM\n",
1696 ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1697 return -EINVAL;
1698 }
1699
1691 ether_addr_copy(remote_mac_addr, ethh->h_source); 1700 ether_addr_copy(remote_mac_addr, ethh->h_source);
1692 ether_addr_copy(local_mac_addr, ethh->h_dest); 1701 ether_addr_copy(local_mac_addr, ethh->h_dest);
1693 1702
@@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2605 struct qed_iwarp_info *iwarp_info; 2614 struct qed_iwarp_info *iwarp_info;
2606 struct qed_ll2_acquire_data data; 2615 struct qed_ll2_acquire_data data;
2607 struct qed_ll2_cbs cbs; 2616 struct qed_ll2_cbs cbs;
2608 u32 mpa_buff_size; 2617 u32 buff_size;
2609 u16 n_ooo_bufs; 2618 u16 n_ooo_bufs;
2610 int rc = 0; 2619 int rc = 0;
2611 int i; 2620 int i;
@@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2632 2641
2633 memset(&data, 0, sizeof(data)); 2642 memset(&data, 0, sizeof(data));
2634 data.input.conn_type = QED_LL2_TYPE_IWARP; 2643 data.input.conn_type = QED_LL2_TYPE_IWARP;
2635 data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; 2644 data.input.mtu = params->max_mtu;
2636 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; 2645 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2637 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; 2646 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2638 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ 2647 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
@@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2654 goto err; 2663 goto err;
2655 } 2664 }
2656 2665
2666 buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2657 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, 2667 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2658 QED_IWARP_LL2_SYN_RX_SIZE, 2668 QED_IWARP_LL2_SYN_RX_SIZE,
2659 QED_IWARP_MAX_SYN_PKT_SIZE, 2669 buff_size,
2660 iwarp_info->ll2_syn_handle); 2670 iwarp_info->ll2_syn_handle);
2661 if (rc) 2671 if (rc)
2662 goto err; 2672 goto err;
@@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2710 if (rc) 2720 if (rc)
2711 goto err; 2721 goto err;
2712 2722
2713 mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2714 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, 2723 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2715 data.input.rx_num_desc, 2724 data.input.rx_num_desc,
2716 mpa_buff_size, 2725 buff_size,
2717 iwarp_info->ll2_mpa_handle); 2726 iwarp_info->ll2_mpa_handle);
2718 if (rc) 2727 if (rc)
2719 goto err; 2728 goto err;
@@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2726 2735
2727 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; 2736 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2728 2737
2729 iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); 2738 iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
2730 if (!iwarp_info->mpa_intermediate_buf) 2739 if (!iwarp_info->mpa_intermediate_buf)
2731 goto err; 2740 goto err;
2732 2741
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index b8f612d00241..7ac959038324 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
46 46
47#define QED_IWARP_LL2_SYN_TX_SIZE (128) 47#define QED_IWARP_LL2_SYN_TX_SIZE (128)
48#define QED_IWARP_LL2_SYN_RX_SIZE (256) 48#define QED_IWARP_LL2_SYN_RX_SIZE (256)
49#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
50 49
51#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) 50#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256)
52#define QED_IWARP_MAX_OOO (16) 51#define QED_IWARP_MAX_OOO (16)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 67c02ea93906..58be1c4c6668 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
609 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 609 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
610 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 610 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
611 611
612 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
613 (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
614 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
615
612 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 616 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
613 !!(accept_filter & QED_ACCEPT_BCAST)); 617 !!(accept_filter & QED_ACCEPT_BCAST));
614 618
@@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
744 return rc; 748 return rc;
745 } 749 }
746 750
751 if (p_params->update_ctl_frame_check) {
752 p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
753 p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
754 }
755
747 /* Update mcast bins for VFs, PF doesn't use this functionality */ 756 /* Update mcast bins for VFs, PF doesn't use this functionality */
748 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 757 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
749 758
@@ -2207,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2207 u16 num_queues = 0; 2216 u16 num_queues = 0;
2208 2217
2209 /* Since the feature controls only queue-zones, 2218 /* Since the feature controls only queue-zones,
2210 * make sure we have the contexts [rx, tx, xdp] to 2219 * make sure we have the contexts [rx, xdp, tcs] to
2211 * match. 2220 * match.
2212 */ 2221 */
2213 for_each_hwfn(cdev, i) { 2222 for_each_hwfn(cdev, i) {
@@ -2217,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2217 u16 cids; 2226 u16 cids;
2218 2227
2219 cids = hwfn->pf_params.eth_pf_params.num_cons; 2228 cids = hwfn->pf_params.eth_pf_params.num_cons;
2220 num_queues += min_t(u16, l2_queues, cids / 3); 2229 cids /= (2 + info->num_tc);
2230 num_queues += min_t(u16, l2_queues, cids);
2221 } 2231 }
2222 2232
2223 /* queues might theoretically be >256, but interrupts' 2233 /* queues might theoretically be >256, but interrupts'
@@ -2688,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2688 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 2698 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2689 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2699 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2690 QED_ACCEPT_MCAST_UNMATCHED; 2700 QED_ACCEPT_MCAST_UNMATCHED;
2691 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2701 accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2702 QED_ACCEPT_MCAST_UNMATCHED;
2692 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 2703 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2693 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2704 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2694 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2705 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
@@ -2860,7 +2871,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2860 p_hwfn = p_cid->p_owner; 2871 p_hwfn = p_cid->p_owner;
2861 rc = qed_get_queue_coalesce(p_hwfn, coal, handle); 2872 rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2862 if (rc) 2873 if (rc)
2863 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); 2874 DP_VERBOSE(cdev, QED_MSG_DEBUG,
2875 "Unable to read queue coalescing\n");
2864 2876
2865 return rc; 2877 return rc;
2866} 2878}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8d80f1095d17..7127d5aaac42 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
219 struct qed_rss_params *rss_params; 219 struct qed_rss_params *rss_params;
220 struct qed_filter_accept_flags accept_flags; 220 struct qed_filter_accept_flags accept_flags;
221 struct qed_sge_tpa_params *sge_tpa_params; 221 struct qed_sge_tpa_params *sge_tpa_params;
222 u8 update_ctl_frame_check;
223 u8 mac_chk_en;
224 u8 ethtype_chk_en;
222}; 225};
223 226
224int qed_sp_vport_update(struct qed_hwfn *p_hwfn, 227int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 90afd514ffe1..b5f419b71287 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1619,6 +1619,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1619 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); 1619 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1620 rx_prod.bd_prod = cpu_to_le16(bd_prod); 1620 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1621 rx_prod.cqe_prod = cpu_to_le16(cq_prod); 1621 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1622
1623 /* Make sure chain element is updated before ringing the doorbell */
1624 dma_wmb();
1625
1622 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); 1626 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1623} 1627}
1624 1628
@@ -2447,19 +2451,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2447{ 2451{
2448 struct qed_ll2_tx_pkt_info pkt; 2452 struct qed_ll2_tx_pkt_info pkt;
2449 const skb_frag_t *frag; 2453 const skb_frag_t *frag;
2454 u8 flags = 0, nr_frags;
2450 int rc = -EINVAL, i; 2455 int rc = -EINVAL, i;
2451 dma_addr_t mapping; 2456 dma_addr_t mapping;
2452 u16 vlan = 0; 2457 u16 vlan = 0;
2453 u8 flags = 0;
2454 2458
2455 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { 2459 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2456 DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); 2460 DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2457 return -EINVAL; 2461 return -EINVAL;
2458 } 2462 }
2459 2463
2460 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { 2464 /* Cache number of fragments from SKB since SKB may be freed by
2465 * the completion routine after calling qed_ll2_prepare_tx_packet()
2466 */
2467 nr_frags = skb_shinfo(skb)->nr_frags;
2468
2469 if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2461 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", 2470 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2462 1 + skb_shinfo(skb)->nr_frags); 2471 1 + nr_frags);
2463 return -EINVAL; 2472 return -EINVAL;
2464 } 2473 }
2465 2474
@@ -2481,7 +2490,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2481 } 2490 }
2482 2491
2483 memset(&pkt, 0, sizeof(pkt)); 2492 memset(&pkt, 0, sizeof(pkt));
2484 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; 2493 pkt.num_of_bds = 1 + nr_frags;
2485 pkt.vlan = vlan; 2494 pkt.vlan = vlan;
2486 pkt.bd_flags = flags; 2495 pkt.bd_flags = flags;
2487 pkt.tx_dest = QED_LL2_TX_DEST_NW; 2496 pkt.tx_dest = QED_LL2_TX_DEST_NW;
@@ -2492,12 +2501,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2492 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) 2501 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2493 pkt.remove_stag = true; 2502 pkt.remove_stag = true;
2494 2503
2504 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2505 * there are no fragments in the skb and subsequently the completion
2506 * routine may run and free the SKB, so no dereferencing the SKB
2507 * beyond this point unless skb has any fragments.
2508 */
2495 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, 2509 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2496 &pkt, 1); 2510 &pkt, 1);
2497 if (rc) 2511 if (rc)
2498 goto err; 2512 goto err;
2499 2513
2500 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2514 for (i = 0; i < nr_frags; i++) {
2501 frag = &skb_shinfo(skb)->frags[i]; 2515 frag = &skb_shinfo(skb)->frags[i];
2502 2516
2503 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, 2517 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 4179c9013fc6..96ab77ae6af5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
382 * @param p_hwfn 382 * @param p_hwfn
383 */ 383 */
384void qed_consq_free(struct qed_hwfn *p_hwfn); 384void qed_consq_free(struct qed_hwfn *p_hwfn);
385int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
385 386
386/** 387/**
387 * @file 388 * @file
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 888274fa208b..5a495fda9e9d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
604 604
605 p_ent->ramrod.pf_update.update_mf_vlan_flag = true; 605 p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
606 p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); 606 p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
607 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
608 p_ent->ramrod.pf_update.mf_vlan |=
609 cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
607 610
608 return qed_spq_post(p_hwfn, p_ent, NULL); 611 return qed_spq_post(p_hwfn, p_ent, NULL);
609} 612}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index eb88bbc6b193..ba64ff9bedbd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
397 397
398 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); 398 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
399 399
400 /* Attempt to post pending requests */
401 spin_lock_bh(&p_hwfn->p_spq->lock);
402 rc = qed_spq_pend_post(p_hwfn);
403 spin_unlock_bh(&p_hwfn->p_spq->lock);
404
400 return rc; 405 return rc;
401} 406}
402 407
@@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
767 return 0; 772 return 0;
768} 773}
769 774
770static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) 775int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
771{ 776{
772 struct qed_spq *p_spq = p_hwfn->p_spq; 777 struct qed_spq *p_spq = p_hwfn->p_spq;
773 struct qed_spq_entry *p_ent = NULL; 778 struct qed_spq_entry *p_ent = NULL;
@@ -905,7 +910,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
905 struct qed_spq_entry *p_ent = NULL; 910 struct qed_spq_entry *p_ent = NULL;
906 struct qed_spq_entry *tmp; 911 struct qed_spq_entry *tmp;
907 struct qed_spq_entry *found = NULL; 912 struct qed_spq_entry *found = NULL;
908 int rc;
909 913
910 if (!p_hwfn) 914 if (!p_hwfn)
911 return -EINVAL; 915 return -EINVAL;
@@ -963,12 +967,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
963 */ 967 */
964 qed_spq_return_entry(p_hwfn, found); 968 qed_spq_return_entry(p_hwfn, found);
965 969
966 /* Attempt to post pending requests */ 970 return 0;
967 spin_lock_bh(&p_spq->lock);
968 rc = qed_spq_pend_post(p_hwfn);
969 spin_unlock_bh(&p_spq->lock);
970
971 return rc;
972} 971}
973 972
974int qed_consq_alloc(struct qed_hwfn *p_hwfn) 973int qed_consq_alloc(struct qed_hwfn *p_hwfn)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ca6290fa0f30..71a7af134dd8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1969 params.vport_id = vf->vport_id; 1969 params.vport_id = vf->vport_id;
1970 params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1970 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1971 params.mtu = vf->mtu; 1971 params.mtu = vf->mtu;
1972 params.check_mac = true; 1972
1973 /* Non trusted VFs should enable control frame filtering */
1974 params.check_mac = !vf->p_vf_info.is_trusted_configured;
1973 1975
1974 rc = qed_sp_eth_vport_start(p_hwfn, &params); 1976 rc = qed_sp_eth_vport_start(p_hwfn, &params);
1975 if (rc) { 1977 if (rc) {
@@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5130 params.opaque_fid = vf->opaque_fid; 5132 params.opaque_fid = vf->opaque_fid;
5131 params.vport_id = vf->vport_id; 5133 params.vport_id = vf->vport_id;
5132 5134
5135 params.update_ctl_frame_check = 1;
5136 params.mac_chk_en = !vf_info->is_trusted_configured;
5137
5133 if (vf_info->rx_accept_mode & mask) { 5138 if (vf_info->rx_accept_mode & mask) {
5134 flags->update_rx_mode_config = 1; 5139 flags->update_rx_mode_config = 1;
5135 flags->rx_accept_filter = vf_info->rx_accept_mode; 5140 flags->rx_accept_filter = vf_info->rx_accept_mode;
@@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5147 } 5152 }
5148 5153
5149 if (flags->update_rx_mode_config || 5154 if (flags->update_rx_mode_config ||
5150 flags->update_tx_mode_config) 5155 flags->update_tx_mode_config ||
5156 params.update_ctl_frame_check)
5151 qed_sp_vport_update(hwfn, &params, 5157 qed_sp_vport_update(hwfn, &params,
5152 QED_SPQ_MODE_EBLOCK, NULL); 5158 QED_SPQ_MODE_EBLOCK, NULL);
5153 } 5159 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index b6cccf44bf40..5dda547772c1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
261 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 261 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
262 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 262 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
263 struct vf_pf_resc_request *p_resc; 263 struct vf_pf_resc_request *p_resc;
264 u8 retry_cnt = VF_ACQUIRE_THRESH;
264 bool resources_acquired = false; 265 bool resources_acquired = false;
265 struct vfpf_acquire_tlv *req; 266 struct vfpf_acquire_tlv *req;
266 int rc = 0, attempts = 0; 267 int rc = 0, attempts = 0;
@@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
314 315
315 /* send acquire request */ 316 /* send acquire request */
316 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 317 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
318
319 /* Re-try acquire in case of vf-pf hw channel timeout */
320 if (retry_cnt && rc == -EBUSY) {
321 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
322 "VF retrying to acquire due to VPC timeout\n");
323 retry_cnt--;
324 continue;
325 }
326
317 if (rc) 327 if (rc)
318 goto exit; 328 goto exit;
319 329
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 613249d1e967..730997b13747 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -56,7 +56,7 @@
56#include <net/tc_act/tc_gact.h> 56#include <net/tc_act/tc_gact.h>
57 57
58#define QEDE_MAJOR_VERSION 8 58#define QEDE_MAJOR_VERSION 8
59#define QEDE_MINOR_VERSION 33 59#define QEDE_MINOR_VERSION 37
60#define QEDE_REVISION_VERSION 0 60#define QEDE_REVISION_VERSION 0
61#define QEDE_ENGINEERING_VERSION 20 61#define QEDE_ENGINEERING_VERSION 20
62#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ 62#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
@@ -494,6 +494,9 @@ struct qede_reload_args {
494 494
495/* Datapath functions definition */ 495/* Datapath functions definition */
496netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); 496netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
497u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
498 struct net_device *sb_dev,
499 select_queue_fallback_t fallback);
497netdev_features_t qede_features_check(struct sk_buff *skb, 500netdev_features_t qede_features_check(struct sk_buff *skb,
498 struct net_device *dev, 501 struct net_device *dev,
499 netdev_features_t features); 502 netdev_features_t features);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index bdf816fe5a16..31b046e24565 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1695 return NETDEV_TX_OK; 1695 return NETDEV_TX_OK;
1696} 1696}
1697 1697
1698u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
1699 struct net_device *sb_dev,
1700 select_queue_fallback_t fallback)
1701{
1702 struct qede_dev *edev = netdev_priv(dev);
1703 int total_txq;
1704
1705 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
1706
1707 return QEDE_TSS_COUNT(edev) ?
1708 fallback(dev, skb, NULL) % total_txq : 0;
1709}
1710
1698/* 8B udp header + 8B base tunnel header + 32B option length */ 1711/* 8B udp header + 8B base tunnel header + 32B option length */
1699#define QEDE_MAX_TUN_HDR_LEN 48 1712#define QEDE_MAX_TUN_HDR_LEN 48
1700 1713
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 5a74fcbdbc2b..9790f26d17c4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = {
631 .ndo_open = qede_open, 631 .ndo_open = qede_open,
632 .ndo_stop = qede_close, 632 .ndo_stop = qede_close,
633 .ndo_start_xmit = qede_start_xmit, 633 .ndo_start_xmit = qede_start_xmit,
634 .ndo_select_queue = qede_select_queue,
634 .ndo_set_rx_mode = qede_set_rx_mode, 635 .ndo_set_rx_mode = qede_set_rx_mode,
635 .ndo_set_mac_address = qede_set_mac_addr, 636 .ndo_set_mac_address = qede_set_mac_addr,
636 .ndo_validate_addr = eth_validate_addr, 637 .ndo_validate_addr = eth_validate_addr,
@@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
666 .ndo_open = qede_open, 667 .ndo_open = qede_open,
667 .ndo_stop = qede_close, 668 .ndo_stop = qede_close,
668 .ndo_start_xmit = qede_start_xmit, 669 .ndo_start_xmit = qede_start_xmit,
670 .ndo_select_queue = qede_select_queue,
669 .ndo_set_rx_mode = qede_set_rx_mode, 671 .ndo_set_rx_mode = qede_set_rx_mode,
670 .ndo_set_mac_address = qede_set_mac_addr, 672 .ndo_set_mac_address = qede_set_mac_addr,
671 .ndo_validate_addr = eth_validate_addr, 673 .ndo_validate_addr = eth_validate_addr,
@@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
684 .ndo_open = qede_open, 686 .ndo_open = qede_open,
685 .ndo_stop = qede_close, 687 .ndo_stop = qede_close,
686 .ndo_start_xmit = qede_start_xmit, 688 .ndo_start_xmit = qede_start_xmit,
689 .ndo_select_queue = qede_select_queue,
687 .ndo_set_rx_mode = qede_set_rx_mode, 690 .ndo_set_rx_mode = qede_set_rx_mode,
688 .ndo_set_mac_address = qede_set_mac_addr, 691 .ndo_set_mac_address = qede_set_mac_addr,
689 .ndo_validate_addr = eth_validate_addr, 692 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index d344e9d43832..af38d3d73291 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -434,14 +434,14 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
434 *(tx_ring->hw_consumer) = 0; 434 *(tx_ring->hw_consumer) = 0;
435 435
436 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 436 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
437 rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, 437 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
438 &rq_phys_addr, GFP_KERNEL); 438 &rq_phys_addr, GFP_KERNEL);
439 if (!rq_addr) 439 if (!rq_addr)
440 return -ENOMEM; 440 return -ENOMEM;
441 441
442 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 442 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
443 rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, 443 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
444 &rsp_phys_addr, GFP_KERNEL); 444 &rsp_phys_addr, GFP_KERNEL);
445 if (!rsp_addr) { 445 if (!rsp_addr) {
446 err = -ENOMEM; 446 err = -ENOMEM;
447 goto out_free_rq; 447 goto out_free_rq;
@@ -855,8 +855,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
855 struct qlcnic_cmd_args cmd; 855 struct qlcnic_cmd_args cmd;
856 size_t nic_size = sizeof(struct qlcnic_info_le); 856 size_t nic_size = sizeof(struct qlcnic_info_le);
857 857
858 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 858 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
859 &nic_dma_t, GFP_KERNEL); 859 &nic_dma_t, GFP_KERNEL);
860 if (!nic_info_addr) 860 if (!nic_info_addr)
861 return -ENOMEM; 861 return -ENOMEM;
862 862
@@ -909,8 +909,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
909 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 909 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
910 return err; 910 return err;
911 911
912 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 912 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
913 &nic_dma_t, GFP_KERNEL); 913 &nic_dma_t, GFP_KERNEL);
914 if (!nic_info_addr) 914 if (!nic_info_addr)
915 return -ENOMEM; 915 return -ENOMEM;
916 916
@@ -964,8 +964,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
964 void *pci_info_addr; 964 void *pci_info_addr;
965 int err = 0, i; 965 int err = 0, i;
966 966
967 pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, 967 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
968 &pci_info_dma_t, GFP_KERNEL); 968 &pci_info_dma_t, GFP_KERNEL);
969 if (!pci_info_addr) 969 if (!pci_info_addr)
970 return -ENOMEM; 970 return -ENOMEM;
971 971
@@ -1078,8 +1078,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
1078 return -EIO; 1078 return -EIO;
1079 } 1079 }
1080 1080
1081 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1081 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
1082 &stats_dma_t, GFP_KERNEL); 1082 &stats_dma_t, GFP_KERNEL);
1083 if (!stats_addr) 1083 if (!stats_addr)
1084 return -ENOMEM; 1084 return -ENOMEM;
1085 1085
@@ -1134,8 +1134,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
1134 if (mac_stats == NULL) 1134 if (mac_stats == NULL)
1135 return -ENOMEM; 1135 return -ENOMEM;
1136 1136
1137 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1137 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
1138 &stats_dma_t, GFP_KERNEL); 1138 &stats_dma_t, GFP_KERNEL);
1139 if (!stats_addr) 1139 if (!stats_addr)
1140 return -ENOMEM; 1140 return -ENOMEM;
1141 1141
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 031f6e6ee9c1..8d790313ee3d 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -776,7 +776,7 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
776 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ 776 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
777 777
778 ring_header->used = 0; 778 ring_header->used = 0;
779 ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, 779 ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size,
780 &ring_header->dma_addr, 780 &ring_header->dma_addr,
781 GFP_KERNEL); 781 GFP_KERNEL);
782 if (!ring_header->v_addr) 782 if (!ring_header->v_addr)
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 44f6e4873aad..4f910c4f67b0 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -691,7 +691,7 @@ static void cp_tx (struct cp_private *cp)
691 } 691 }
692 bytes_compl += skb->len; 692 bytes_compl += skb->len;
693 pkts_compl++; 693 pkts_compl++;
694 dev_kfree_skb_irq(skb); 694 dev_consume_skb_irq(skb);
695 } 695 }
696 696
697 cp->tx_skb[tx_tail] = NULL; 697 cp->tx_skb[tx_tail] = NULL;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 298930d39b79..6e36b88ca7c9 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -205,6 +205,8 @@ enum cfg_version {
205}; 205};
206 206
207static const struct pci_device_id rtl8169_pci_tbl[] = { 207static const struct pci_device_id rtl8169_pci_tbl[] = {
208 { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
209 { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
208 { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 }, 210 { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 },
209 { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 }, 211 { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 },
210 { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 }, 212 { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 },
@@ -706,6 +708,7 @@ module_param(use_dac, int, 0);
706MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); 708MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
707module_param_named(debug, debug.msg_enable, int, 0); 709module_param_named(debug, debug.msg_enable, int, 0);
708MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); 710MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
711MODULE_SOFTDEP("pre: realtek");
709MODULE_LICENSE("GPL"); 712MODULE_LICENSE("GPL");
710MODULE_FIRMWARE(FIRMWARE_8168D_1); 713MODULE_FIRMWARE(FIRMWARE_8168D_1);
711MODULE_FIRMWARE(FIRMWARE_8168D_2); 714MODULE_FIRMWARE(FIRMWARE_8168D_2);
@@ -1283,11 +1286,13 @@ static u16 rtl_get_events(struct rtl8169_private *tp)
1283static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) 1286static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1284{ 1287{
1285 RTL_W16(tp, IntrStatus, bits); 1288 RTL_W16(tp, IntrStatus, bits);
1289 mmiowb();
1286} 1290}
1287 1291
1288static void rtl_irq_disable(struct rtl8169_private *tp) 1292static void rtl_irq_disable(struct rtl8169_private *tp)
1289{ 1293{
1290 RTL_W16(tp, IntrMask, 0); 1294 RTL_W16(tp, IntrMask, 0);
1295 mmiowb();
1291} 1296}
1292 1297
1293#define RTL_EVENT_NAPI_RX (RxOK | RxErr) 1298#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
@@ -1679,11 +1684,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp)
1679 1684
1680static bool rtl8169_update_counters(struct rtl8169_private *tp) 1685static bool rtl8169_update_counters(struct rtl8169_private *tp)
1681{ 1686{
1687 u8 val = RTL_R8(tp, ChipCmd);
1688
1682 /* 1689 /*
1683 * Some chips are unable to dump tally counters when the receiver 1690 * Some chips are unable to dump tally counters when the receiver
1684 * is disabled. 1691 * is disabled. If 0xff chip may be in a PCI power-save state.
1685 */ 1692 */
1686 if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) 1693 if (!(val & CmdRxEnb) || val == 0xff)
1687 return true; 1694 return true;
1688 1695
1689 return rtl8169_do_counters(tp, CounterDump); 1696 return rtl8169_do_counters(tp, CounterDump);
@@ -6067,7 +6074,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6067 struct device *d = tp_to_dev(tp); 6074 struct device *d = tp_to_dev(tp);
6068 dma_addr_t mapping; 6075 dma_addr_t mapping;
6069 u32 opts[2], len; 6076 u32 opts[2], len;
6070 bool stop_queue;
6071 int frags; 6077 int frags;
6072 6078
6073 if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { 6079 if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
@@ -6109,6 +6115,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6109 6115
6110 txd->opts2 = cpu_to_le32(opts[1]); 6116 txd->opts2 = cpu_to_le32(opts[1]);
6111 6117
6118 netdev_sent_queue(dev, skb->len);
6119
6112 skb_tx_timestamp(skb); 6120 skb_tx_timestamp(skb);
6113 6121
6114 /* Force memory writes to complete before releasing descriptor */ 6122 /* Force memory writes to complete before releasing descriptor */
@@ -6121,14 +6129,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6121 6129
6122 tp->cur_tx += frags + 1; 6130 tp->cur_tx += frags + 1;
6123 6131
6124 stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); 6132 RTL_W8(tp, TxPoll, NPQ);
6125 if (unlikely(stop_queue))
6126 netif_stop_queue(dev);
6127 6133
6128 if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) 6134 mmiowb();
6129 RTL_W8(tp, TxPoll, NPQ);
6130 6135
6131 if (unlikely(stop_queue)) { 6136 if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
6137 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
6138 * not miss a ring update when it notices a stopped queue.
6139 */
6140 smp_wmb();
6141 netif_stop_queue(dev);
6132 /* Sync with rtl_tx: 6142 /* Sync with rtl_tx:
6133 * - publish queue status and cur_tx ring index (write barrier) 6143 * - publish queue status and cur_tx ring index (write barrier)
6134 * - refresh dirty_tx ring index (read barrier). 6144 * - refresh dirty_tx ring index (read barrier).
@@ -6478,7 +6488,9 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
6478 6488
6479 if (work_done < budget) { 6489 if (work_done < budget) {
6480 napi_complete_done(napi, work_done); 6490 napi_complete_done(napi, work_done);
6491
6481 rtl_irq_enable(tp); 6492 rtl_irq_enable(tp);
6493 mmiowb();
6482 } 6494 }
6483 6495
6484 return work_done; 6496 return work_done;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ffc1ada4e6da..d28c8f9ca55b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -343,7 +343,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
343 int i; 343 int i;
344 344
345 priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + 345 priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
346 ETH_HLEN + VLAN_HLEN; 346 ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
347 347
348 /* Allocate RX and TX skb rings */ 348 /* Allocate RX and TX skb rings */
349 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 349 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -524,13 +524,15 @@ static void ravb_rx_csum(struct sk_buff *skb)
524{ 524{
525 u8 *hw_csum; 525 u8 *hw_csum;
526 526
527 /* The hardware checksum is 2 bytes appended to packet data */ 527 /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
528 if (unlikely(skb->len < 2)) 528 * appended to packet data
529 */
530 if (unlikely(skb->len < sizeof(__sum16)))
529 return; 531 return;
530 hw_csum = skb_tail_pointer(skb) - 2; 532 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
531 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); 533 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
532 skb->ip_summed = CHECKSUM_COMPLETE; 534 skb->ip_summed = CHECKSUM_COMPLETE;
533 skb_trim(skb, skb->len - 2); 535 skb_trim(skb, skb->len - sizeof(__sum16));
534} 536}
535 537
536/* Packet receive function for Ethernet AVB */ 538/* Packet receive function for Ethernet AVB */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 690aee88f0eb..6d22dd500790 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -400,9 +400,9 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
400 } 400 }
401 401
402 /* allocate memory for TX descriptors */ 402 /* allocate memory for TX descriptors */
403 tx_ring->dma_tx = dma_zalloc_coherent(dev, 403 tx_ring->dma_tx = dma_alloc_coherent(dev,
404 tx_rsize * sizeof(struct sxgbe_tx_norm_desc), 404 tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
405 &tx_ring->dma_tx_phy, GFP_KERNEL); 405 &tx_ring->dma_tx_phy, GFP_KERNEL);
406 if (!tx_ring->dma_tx) 406 if (!tx_ring->dma_tx)
407 return -ENOMEM; 407 return -ENOMEM;
408 408
@@ -479,9 +479,9 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
479 rx_ring->queue_no = queue_no; 479 rx_ring->queue_no = queue_no;
480 480
481 /* allocate memory for RX descriptors */ 481 /* allocate memory for RX descriptors */
482 rx_ring->dma_rx = dma_zalloc_coherent(priv->device, 482 rx_ring->dma_rx = dma_alloc_coherent(priv->device,
483 rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 483 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
484 &rx_ring->dma_rx_phy, GFP_KERNEL); 484 &rx_ring->dma_rx_phy, GFP_KERNEL);
485 485
486 if (rx_ring->dma_rx == NULL) 486 if (rx_ring->dma_rx == NULL)
487 return -ENOMEM; 487 return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index b6a50058bb8d..c08034154a9a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6046,22 +6046,25 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
6046 { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, 6046 { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
6047 { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" } 6047 { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }
6048}; 6048};
6049#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types)
6049 6050
6050static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, 6051static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6051 struct efx_mcdi_mtd_partition *part, 6052 struct efx_mcdi_mtd_partition *part,
6052 unsigned int type) 6053 unsigned int type,
6054 unsigned long *found)
6053{ 6055{
6054 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); 6056 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
6055 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); 6057 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
6056 const struct efx_ef10_nvram_type_info *info; 6058 const struct efx_ef10_nvram_type_info *info;
6057 size_t size, erase_size, outlen; 6059 size_t size, erase_size, outlen;
6060 int type_idx = 0;
6058 bool protected; 6061 bool protected;
6059 int rc; 6062 int rc;
6060 6063
6061 for (info = efx_ef10_nvram_types; ; info++) { 6064 for (type_idx = 0; ; type_idx++) {
6062 if (info == 6065 if (type_idx == EF10_NVRAM_PARTITION_COUNT)
6063 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
6064 return -ENODEV; 6066 return -ENODEV;
6067 info = efx_ef10_nvram_types + type_idx;
6065 if ((type & ~info->type_mask) == info->type) 6068 if ((type & ~info->type_mask) == info->type)
6066 break; 6069 break;
6067 } 6070 }
@@ -6074,6 +6077,13 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6074 if (protected) 6077 if (protected)
6075 return -ENODEV; /* hide it */ 6078 return -ENODEV; /* hide it */
6076 6079
6080 /* If we've already exposed a partition of this type, hide this
6081 * duplicate. All operations on MTDs are keyed by the type anyway,
6082 * so we can't act on the duplicate.
6083 */
6084 if (__test_and_set_bit(type_idx, found))
6085 return -EEXIST;
6086
6077 part->nvram_type = type; 6087 part->nvram_type = type;
6078 6088
6079 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); 6089 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
@@ -6105,6 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6105static int efx_ef10_mtd_probe(struct efx_nic *efx) 6115static int efx_ef10_mtd_probe(struct efx_nic *efx)
6106{ 6116{
6107 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); 6117 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
6118 DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
6108 struct efx_mcdi_mtd_partition *parts; 6119 struct efx_mcdi_mtd_partition *parts;
6109 size_t outlen, n_parts_total, i, n_parts; 6120 size_t outlen, n_parts_total, i, n_parts;
6110 unsigned int type; 6121 unsigned int type;
@@ -6133,11 +6144,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
6133 for (i = 0; i < n_parts_total; i++) { 6144 for (i = 0; i < n_parts_total; i++) {
6134 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, 6145 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
6135 i); 6146 i);
6136 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); 6147 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
6137 if (rc == 0) 6148 found);
6138 n_parts++; 6149 if (rc == -EEXIST || rc == -ENODEV)
6139 else if (rc != -ENODEV) 6150 continue;
6151 if (rc)
6140 goto fail; 6152 goto fail;
6153 n_parts++;
6141 } 6154 }
6142 6155
6143 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); 6156 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index a8ecb33390da..9c07b5175581 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -33,8 +33,8 @@
33int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer, 33int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
34 unsigned int len, gfp_t gfp_flags) 34 unsigned int len, gfp_t gfp_flags)
35{ 35{
36 buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, 36 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
37 &buffer->dma_addr, gfp_flags); 37 &buffer->dma_addr, gfp_flags);
38 if (!buffer->addr) 38 if (!buffer->addr)
39 return -ENOMEM; 39 return -ENOMEM;
40 buffer->len = len; 40 buffer->len = len;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index aa1945a858d5..c2d45a40eb48 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -34,8 +34,8 @@
34int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 34int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
35 unsigned int len, gfp_t gfp_flags) 35 unsigned int len, gfp_t gfp_flags)
36{ 36{
37 buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, 37 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
38 &buffer->dma_addr, gfp_flags); 38 &buffer->dma_addr, gfp_flags);
39 if (!buffer->addr) 39 if (!buffer->addr)
40 return -ENOMEM; 40 return -ENOMEM;
41 buffer->len = len; 41 buffer->len = len;
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 703fbbefea44..0e1b7e960b98 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -211,8 +211,8 @@ static void meth_check_link(struct net_device *dev)
211static int meth_init_tx_ring(struct meth_private *priv) 211static int meth_init_tx_ring(struct meth_private *priv)
212{ 212{
213 /* Init TX ring */ 213 /* Init TX ring */
214 priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE, 214 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
215 &priv->tx_ring_dma, GFP_ATOMIC); 215 &priv->tx_ring_dma, GFP_ATOMIC);
216 if (!priv->tx_ring) 216 if (!priv->tx_ring)
217 return -ENOMEM; 217 return -ENOMEM;
218 218
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 15c62c160953..be47d864f8b9 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
1037 skb = ep->tx_skbuff[entry]; 1037 skb = ep->tx_skbuff[entry];
1038 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, 1038 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1039 skb->len, PCI_DMA_TODEVICE); 1039 skb->len, PCI_DMA_TODEVICE);
1040 dev_kfree_skb_irq(skb); 1040 dev_consume_skb_irq(skb);
1041 ep->tx_skbuff[entry] = NULL; 1041 ep->tx_skbuff[entry] = NULL;
1042 } 1042 }
1043 1043
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 05a0948ad929..a18149720aa2 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1029,8 +1029,8 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1029 struct netsec_desc_ring *dring = &priv->desc_ring[id]; 1029 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1030 int i; 1030 int i;
1031 1031
1032 dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, 1032 dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
1033 &dring->desc_dma, GFP_KERNEL); 1033 &dring->desc_dma, GFP_KERNEL);
1034 if (!dring->vaddr) 1034 if (!dring->vaddr)
1035 goto err; 1035 goto err;
1036 1036
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 7b923362ee55..3b174eae77c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
1342 } 1342 }
1343 1343
1344 ret = phy_power_on(bsp_priv, true); 1344 ret = phy_power_on(bsp_priv, true);
1345 if (ret) 1345 if (ret) {
1346 gmac_clk_enable(bsp_priv, false);
1346 return ret; 1347 return ret;
1348 }
1347 1349
1348 pm_runtime_enable(dev); 1350 pm_runtime_enable(dev);
1349 pm_runtime_get_sync(dev); 1351 pm_runtime_get_sync(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 20299f6f65fc..736e29635b77 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
241static int dwmac4_rx_check_timestamp(void *desc) 241static int dwmac4_rx_check_timestamp(void *desc)
242{ 242{
243 struct dma_desc *p = (struct dma_desc *)desc; 243 struct dma_desc *p = (struct dma_desc *)desc;
244 unsigned int rdes0 = le32_to_cpu(p->des0);
245 unsigned int rdes1 = le32_to_cpu(p->des1);
246 unsigned int rdes3 = le32_to_cpu(p->des3);
244 u32 own, ctxt; 247 u32 own, ctxt;
245 int ret = 1; 248 int ret = 1;
246 249
247 own = p->des3 & RDES3_OWN; 250 own = rdes3 & RDES3_OWN;
248 ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) 251 ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
249 >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); 252 >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
250 253
251 if (likely(!own && ctxt)) { 254 if (likely(!own && ctxt)) {
252 if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) 255 if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
253 /* Corrupted value */ 256 /* Corrupted value */
254 ret = -EINVAL; 257 ret = -EINVAL;
255 else 258 else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 6c5092e7771c..c5e25580a43f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -263,6 +263,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
263 struct stmmac_extra_stats *x, u32 chan) 263 struct stmmac_extra_stats *x, u32 chan)
264{ 264{
265 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); 265 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
266 u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
266 int ret = 0; 267 int ret = 0;
267 268
268 /* ABNORMAL interrupts */ 269 /* ABNORMAL interrupts */
@@ -282,8 +283,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
282 x->normal_irq_n++; 283 x->normal_irq_n++;
283 284
284 if (likely(intr_status & XGMAC_RI)) { 285 if (likely(intr_status & XGMAC_RI)) {
285 u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 286 if (likely(intr_en & XGMAC_RIE)) {
286 if (likely(value & XGMAC_RIE)) {
287 x->rx_normal_irq_n++; 287 x->rx_normal_irq_n++;
288 ret |= handle_rx; 288 ret |= handle_rx;
289 } 289 }
@@ -295,7 +295,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
295 } 295 }
296 296
297 /* Clear interrupts */ 297 /* Clear interrupts */
298 writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan)); 298 writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
299 299
300 return ret; 300 return ret;
301} 301}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d1f61c25d82b..3c749c327cbd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -696,33 +696,38 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
696 struct ethtool_eee *edata) 696 struct ethtool_eee *edata)
697{ 697{
698 struct stmmac_priv *priv = netdev_priv(dev); 698 struct stmmac_priv *priv = netdev_priv(dev);
699 int ret;
699 700
700 priv->eee_enabled = edata->eee_enabled; 701 if (!edata->eee_enabled) {
701
702 if (!priv->eee_enabled)
703 stmmac_disable_eee_mode(priv); 702 stmmac_disable_eee_mode(priv);
704 else { 703 } else {
705 /* We are asking for enabling the EEE but it is safe 704 /* We are asking for enabling the EEE but it is safe
706 * to verify all by invoking the eee_init function. 705 * to verify all by invoking the eee_init function.
707 * In case of failure it will return an error. 706 * In case of failure it will return an error.
708 */ 707 */
709 priv->eee_enabled = stmmac_eee_init(priv); 708 edata->eee_enabled = stmmac_eee_init(priv);
710 if (!priv->eee_enabled) 709 if (!edata->eee_enabled)
711 return -EOPNOTSUPP; 710 return -EOPNOTSUPP;
712
713 /* Do not change tx_lpi_timer in case of failure */
714 priv->tx_lpi_timer = edata->tx_lpi_timer;
715 } 711 }
716 712
717 return phy_ethtool_set_eee(dev->phydev, edata); 713 ret = phy_ethtool_set_eee(dev->phydev, edata);
714 if (ret)
715 return ret;
716
717 priv->eee_enabled = edata->eee_enabled;
718 priv->tx_lpi_timer = edata->tx_lpi_timer;
719 return 0;
718} 720}
719 721
720static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) 722static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
721{ 723{
722 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); 724 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
723 725
724 if (!clk) 726 if (!clk) {
725 return 0; 727 clk = priv->plat->clk_ref_rate;
728 if (!clk)
729 return 0;
730 }
726 731
727 return (usec * (clk / 1000000)) / 256; 732 return (usec * (clk / 1000000)) / 256;
728} 733}
@@ -731,8 +736,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
731{ 736{
732 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); 737 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
733 738
734 if (!clk) 739 if (!clk) {
735 return 0; 740 clk = priv->plat->clk_ref_rate;
741 if (!clk)
742 return 0;
743 }
736 744
737 return (riwt * 256) / (clk / 1000000); 745 return (riwt * 256) / (clk / 1000000);
738} 746}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0e0a0789c2ed..685d20472358 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1549,22 +1549,18 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1549 goto err_dma; 1549 goto err_dma;
1550 1550
1551 if (priv->extend_desc) { 1551 if (priv->extend_desc) {
1552 rx_q->dma_erx = dma_zalloc_coherent(priv->device, 1552 rx_q->dma_erx = dma_alloc_coherent(priv->device,
1553 DMA_RX_SIZE * 1553 DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1554 sizeof(struct 1554 &rx_q->dma_rx_phy,
1555 dma_extended_desc), 1555 GFP_KERNEL);
1556 &rx_q->dma_rx_phy,
1557 GFP_KERNEL);
1558 if (!rx_q->dma_erx) 1556 if (!rx_q->dma_erx)
1559 goto err_dma; 1557 goto err_dma;
1560 1558
1561 } else { 1559 } else {
1562 rx_q->dma_rx = dma_zalloc_coherent(priv->device, 1560 rx_q->dma_rx = dma_alloc_coherent(priv->device,
1563 DMA_RX_SIZE * 1561 DMA_RX_SIZE * sizeof(struct dma_desc),
1564 sizeof(struct 1562 &rx_q->dma_rx_phy,
1565 dma_desc), 1563 GFP_KERNEL);
1566 &rx_q->dma_rx_phy,
1567 GFP_KERNEL);
1568 if (!rx_q->dma_rx) 1564 if (!rx_q->dma_rx)
1569 goto err_dma; 1565 goto err_dma;
1570 } 1566 }
@@ -1612,21 +1608,17 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1612 goto err_dma; 1608 goto err_dma;
1613 1609
1614 if (priv->extend_desc) { 1610 if (priv->extend_desc) {
1615 tx_q->dma_etx = dma_zalloc_coherent(priv->device, 1611 tx_q->dma_etx = dma_alloc_coherent(priv->device,
1616 DMA_TX_SIZE * 1612 DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1617 sizeof(struct 1613 &tx_q->dma_tx_phy,
1618 dma_extended_desc), 1614 GFP_KERNEL);
1619 &tx_q->dma_tx_phy,
1620 GFP_KERNEL);
1621 if (!tx_q->dma_etx) 1615 if (!tx_q->dma_etx)
1622 goto err_dma; 1616 goto err_dma;
1623 } else { 1617 } else {
1624 tx_q->dma_tx = dma_zalloc_coherent(priv->device, 1618 tx_q->dma_tx = dma_alloc_coherent(priv->device,
1625 DMA_TX_SIZE * 1619 DMA_TX_SIZE * sizeof(struct dma_desc),
1626 sizeof(struct 1620 &tx_q->dma_tx_phy,
1627 dma_desc), 1621 GFP_KERNEL);
1628 &tx_q->dma_tx_phy,
1629 GFP_KERNEL);
1630 if (!tx_q->dma_tx) 1622 if (!tx_q->dma_tx)
1631 goto err_dma; 1623 goto err_dma;
1632 } 1624 }
@@ -3031,10 +3023,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3031 3023
3032 tx_q = &priv->tx_queue[queue]; 3024 tx_q = &priv->tx_queue[queue];
3033 3025
3026 if (priv->tx_path_in_lpi_mode)
3027 stmmac_disable_eee_mode(priv);
3028
3034 /* Manage oversized TCP frames for GMAC4 device */ 3029 /* Manage oversized TCP frames for GMAC4 device */
3035 if (skb_is_gso(skb) && priv->tso) { 3030 if (skb_is_gso(skb) && priv->tso) {
3036 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 3031 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3032 /*
3033 * There is no way to determine the number of TSO
3034 * capable Queues. Let's use always the Queue 0
3035 * because if TSO is supported then at least this
3036 * one will be capable.
3037 */
3038 skb_set_queue_mapping(skb, 0);
3039
3037 return stmmac_tso_xmit(skb, dev); 3040 return stmmac_tso_xmit(skb, dev);
3041 }
3038 } 3042 }
3039 3043
3040 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 3044 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3049,9 +3053,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3049 return NETDEV_TX_BUSY; 3053 return NETDEV_TX_BUSY;
3050 } 3054 }
3051 3055
3052 if (priv->tx_path_in_lpi_mode)
3053 stmmac_disable_eee_mode(priv);
3054
3055 entry = tx_q->cur_tx; 3056 entry = tx_q->cur_tx;
3056 first_entry = entry; 3057 first_entry = entry;
3057 WARN_ON(tx_q->tx_skbuff[first_entry]); 3058 WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -3525,27 +3526,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3525 struct stmmac_channel *ch = 3526 struct stmmac_channel *ch =
3526 container_of(napi, struct stmmac_channel, napi); 3527 container_of(napi, struct stmmac_channel, napi);
3527 struct stmmac_priv *priv = ch->priv_data; 3528 struct stmmac_priv *priv = ch->priv_data;
3528 int work_done = 0, work_rem = budget; 3529 int work_done, rx_done = 0, tx_done = 0;
3529 u32 chan = ch->index; 3530 u32 chan = ch->index;
3530 3531
3531 priv->xstats.napi_poll++; 3532 priv->xstats.napi_poll++;
3532 3533
3533 if (ch->has_tx) { 3534 if (ch->has_tx)
3534 int done = stmmac_tx_clean(priv, work_rem, chan); 3535 tx_done = stmmac_tx_clean(priv, budget, chan);
3536 if (ch->has_rx)
3537 rx_done = stmmac_rx(priv, budget, chan);
3535 3538
3536 work_done += done; 3539 work_done = max(rx_done, tx_done);
3537 work_rem -= done; 3540 work_done = min(work_done, budget);
3538 }
3539 3541
3540 if (ch->has_rx) { 3542 if (work_done < budget && napi_complete_done(napi, work_done)) {
3541 int done = stmmac_rx(priv, work_rem, chan); 3543 int stat;
3542 3544
3543 work_done += done;
3544 work_rem -= done;
3545 }
3546
3547 if (work_done < budget && napi_complete_done(napi, work_done))
3548 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3545 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3546 stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3547 &priv->xstats, chan);
3548 if (stat && napi_reschedule(napi))
3549 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
3550 }
3549 3551
3550 return work_done; 3552 return work_done;
3551} 3553}
@@ -4168,6 +4170,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
4168 return ret; 4170 return ret;
4169 } 4171 }
4170 4172
4173 /* Rx Watchdog is available in the COREs newer than the 3.40.
4174 * In some case, for example on bugged HW this feature
4175 * has to be disable and this can be done by passing the
4176 * riwt_off field from the platform.
4177 */
4178 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4179 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4180 priv->use_riwt = 1;
4181 dev_info(priv->device,
4182 "Enable RX Mitigation via HW Watchdog Timer\n");
4183 }
4184
4171 return 0; 4185 return 0;
4172} 4186}
4173 4187
@@ -4300,18 +4314,6 @@ int stmmac_dvr_probe(struct device *device,
4300 if (flow_ctrl) 4314 if (flow_ctrl)
4301 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 4315 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4302 4316
4303 /* Rx Watchdog is available in the COREs newer than the 3.40.
4304 * In some case, for example on bugged HW this feature
4305 * has to be disable and this can be done by passing the
4306 * riwt_off field from the platform.
4307 */
4308 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4309 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4310 priv->use_riwt = 1;
4311 dev_info(priv->device,
4312 "Enable RX Mitigation via HW Watchdog Timer\n");
4313 }
4314
4315 /* Setup channels NAPI */ 4317 /* Setup channels NAPI */
4316 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 4318 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4317 4319
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index c54a50dbd5ac..d819e8eaba12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
299 */ 299 */
300static void stmmac_pci_remove(struct pci_dev *pdev) 300static void stmmac_pci_remove(struct pci_dev *pdev)
301{ 301{
302 int i;
303
302 stmmac_dvr_remove(&pdev->dev); 304 stmmac_dvr_remove(&pdev->dev);
305
306 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
307 if (pci_resource_len(pdev, i) == 0)
308 continue;
309 pcim_iounmap_regions(pdev, BIT(i));
310 break;
311 }
312
303 pci_disable_device(pdev); 313 pci_disable_device(pdev);
304} 314}
305 315
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 531294f4978b..58ea18af9813 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
301 /* Queue 0 is not AVB capable */ 301 /* Queue 0 is not AVB capable */
302 if (queue <= 0 || queue >= tx_queues_count) 302 if (queue <= 0 || queue >= tx_queues_count)
303 return -EINVAL; 303 return -EINVAL;
304 if (!priv->dma_cap.av)
305 return -EOPNOTSUPP;
304 if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) 306 if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
305 return -EOPNOTSUPP; 307 return -EOPNOTSUPP;
306 308
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 9020b084b953..6fc05c106afc 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1,22 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. 2/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
3 * 3 *
4 * Copyright (C) 2004 Sun Microsystems Inc. 4 * Copyright (C) 2004 Sun Microsystems Inc.
5 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) 5 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
6 * 6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 *
20 * This driver uses the sungem driver (c) David Miller 7 * This driver uses the sungem driver (c) David Miller
21 * (davem@redhat.com) as its basis. 8 * (davem@redhat.com) as its basis.
22 * 9 *
@@ -1911,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1911 cp->net_stats[ring].tx_packets++; 1898 cp->net_stats[ring].tx_packets++;
1912 cp->net_stats[ring].tx_bytes += skb->len; 1899 cp->net_stats[ring].tx_bytes += skb->len;
1913 spin_unlock(&cp->stat_lock[ring]); 1900 spin_unlock(&cp->stat_lock[ring]);
1914 dev_kfree_skb_irq(skb); 1901 dev_consume_skb_irq(skb);
1915 } 1902 }
1916 cp->tx_old[ring] = entry; 1903 cp->tx_old[ring] = entry;
1917 1904
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index 13f3860496a8..ae5f05f03f88 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0+ */
2/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ 2/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
3 * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. 3 * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
4 * 4 *
5 * Copyright (C) 2004 Sun Microsystems Inc. 5 * Copyright (C) 2004 Sun Microsystems Inc.
6 * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) 6 * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 *
21 * vendor id: 0x108E (Sun Microsystems, Inc.) 8 * vendor id: 0x108E (Sun Microsystems, Inc.)
22 * device id: 0xabba (Cassini) 9 * device id: 0xabba (Cassini)
23 * revision ids: 0x01 = Cassini 10 * revision ids: 0x01 = Cassini
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 720b7ac77f3b..e9b757b03b56 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp)
781 781
782 DTX(("skb(%p) ", skb)); 782 DTX(("skb(%p) ", skb));
783 bp->tx_skbs[elem] = NULL; 783 bp->tx_skbs[elem] = NULL;
784 dev_kfree_skb_irq(skb); 784 dev_consume_skb_irq(skb);
785 785
786 elem = NEXT_TX(elem); 786 elem = NEXT_TX(elem);
787 } 787 }
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index ff641cf30a4e..d007dfeba5c3 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp)
1962 this = &txbase[elem]; 1962 this = &txbase[elem];
1963 } 1963 }
1964 1964
1965 dev_kfree_skb_irq(skb); 1965 dev_consume_skb_irq(skb);
1966 dev->stats.tx_packets++; 1966 dev->stats.tx_packets++;
1967 } 1967 }
1968 hp->tx_old = elem; 1968 hp->tx_old = elem;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index dc966ddb6d81..b24c11187017 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
1739 tx_level -= db->rptr->len; /* '-' koz len is negative */ 1739 tx_level -= db->rptr->len; /* '-' koz len is negative */
1740 1740
1741 /* now should come skb pointer - free it */ 1741 /* now should come skb pointer - free it */
1742 dev_kfree_skb_irq(db->rptr->addr.skb); 1742 dev_consume_skb_irq(db->rptr->addr.skb);
1743 bdx_tx_db_inc_rptr(db); 1743 bdx_tx_db_inc_rptr(db);
1744 } 1744 }
1745 1745
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 810dfc7de1f9..e2d47b24a869 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -608,7 +608,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
608 netdev_dbg(dev, "sent 0x%p, len=%d\n", 608 netdev_dbg(dev, "sent 0x%p, len=%d\n",
609 desc->skb, desc->skb->len); 609 desc->skb, desc->skb->len);
610 610
611 dev_kfree_skb_irq(desc->skb); 611 dev_consume_skb_irq(desc->skb);
612 desc->skb = NULL; 612 desc->skb = NULL;
613 if (__netif_subqueue_stopped(dev, queue)) 613 if (__netif_subqueue_stopped(dev, queue))
614 netif_wake_subqueue(dev, queue); 614 netif_wake_subqueue(dev, queue);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1f612268c998..d847f672a705 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
259 const char *name; 259 const char *name;
260 char node_name[32]; 260 char node_name[32];
261 261
262 if (of_property_read_string(node, "label", &name) < 0) { 262 if (of_property_read_string(child, "label", &name) < 0) {
263 snprintf(node_name, sizeof(node_name), "%pOFn", child); 263 snprintf(node_name, sizeof(node_name), "%pOFn", child);
264 name = node_name; 264 name = node_name;
265 } 265 }
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index edcd1e60b30d..37925a1d58de 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1311,13 +1311,13 @@ static int tsi108_open(struct net_device *dev)
1311 data->id, dev->irq, dev->name); 1311 data->id, dev->irq, dev->name);
1312 } 1312 }
1313 1313
1314 data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size, 1314 data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
1315 &data->rxdma, GFP_KERNEL); 1315 &data->rxdma, GFP_KERNEL);
1316 if (!data->rxring) 1316 if (!data->rxring)
1317 return -ENOMEM; 1317 return -ENOMEM;
1318 1318
1319 data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size, 1319 data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
1320 &data->txdma, GFP_KERNEL); 1320 &data->txdma, GFP_KERNEL);
1321 if (!data->txring) { 1321 if (!data->txring) {
1322 dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, 1322 dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
1323 data->rxdma); 1323 data->rxdma);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 82412691ee66..27f6cf140845 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
1740 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], 1740 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1741 le16_to_cpu(pktlen), DMA_TO_DEVICE); 1741 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1742 } 1742 }
1743 dev_kfree_skb_irq(skb); 1743 dev_consume_skb_irq(skb);
1744 tdinfo->skb = NULL; 1744 tdinfo->skb = NULL;
1745} 1745}
1746 1746
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2241f9897092..15bb058db392 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
243 243
244 /* allocate the tx and rx ring buffer descriptors. */ 244 /* allocate the tx and rx ring buffer descriptors. */
245 /* returns a virtual address and a physical address. */ 245 /* returns a virtual address and a physical address. */
246 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 246 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
247 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 247 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
248 &lp->tx_bd_p, GFP_KERNEL); 248 &lp->tx_bd_p, GFP_KERNEL);
249 if (!lp->tx_bd_v) 249 if (!lp->tx_bd_v)
250 goto out; 250 goto out;
251 251
252 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 252 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
253 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 253 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
254 &lp->rx_bd_p, GFP_KERNEL); 254 &lp->rx_bd_p, GFP_KERNEL);
255 if (!lp->rx_bd_v) 255 if (!lp->rx_bd_v)
256 goto out; 256 goto out;
257 257
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 12a14609ec47..0789d8af7d72 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -199,15 +199,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
199 lp->rx_bd_ci = 0; 199 lp->rx_bd_ci = 0;
200 200
201 /* Allocate the Tx and Rx buffer descriptors. */ 201 /* Allocate the Tx and Rx buffer descriptors. */
202 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 202 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
203 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 203 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
204 &lp->tx_bd_p, GFP_KERNEL); 204 &lp->tx_bd_p, GFP_KERNEL);
205 if (!lp->tx_bd_v) 205 if (!lp->tx_bd_v)
206 goto out; 206 goto out;
207 207
208 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 208 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
209 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 209 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
210 &lp->rx_bd_p, GFP_KERNEL); 210 &lp->rx_bd_p, GFP_KERNEL);
211 if (!lp->rx_bd_v) 211 if (!lp->rx_bd_v)
212 goto out; 212 goto out;
213 213
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 61fceee73c1b..56b7791911bf 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1139,9 +1139,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
1139#endif 1139#endif
1140 sizeof(PI_CONSUMER_BLOCK) + 1140 sizeof(PI_CONSUMER_BLOCK) +
1141 (PI_ALIGN_K_DESC_BLK - 1); 1141 (PI_ALIGN_K_DESC_BLK - 1);
1142 bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, 1142 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1143 &bp->kmalloced_dma, 1143 &bp->kmalloced_dma,
1144 GFP_ATOMIC); 1144 GFP_ATOMIC);
1145 if (top_v == NULL) 1145 if (top_v == NULL)
1146 return DFX_K_FAILURE; 1146 return DFX_K_FAILURE;
1147 1147
@@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp)
3512 bp->descr_block_virt->xmt_data[comp].long_1, 3512 bp->descr_block_virt->xmt_data[comp].long_1,
3513 p_xmt_drv_descr->p_skb->len, 3513 p_xmt_drv_descr->p_skb->len,
3514 DMA_TO_DEVICE); 3514 DMA_TO_DEVICE);
3515 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); 3515 dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
3516 3516
3517 /* 3517 /*
3518 * Move to start of next packet by updating completion index 3518 * Move to start of next packet by updating completion index
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 72433f3efc74..5d661f60b101 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -409,10 +409,10 @@ static int skfp_driver_init(struct net_device *dev)
409 if (bp->SharedMemSize > 0) { 409 if (bp->SharedMemSize > 0) {
410 bp->SharedMemSize += 16; // for descriptor alignment 410 bp->SharedMemSize += 16; // for descriptor alignment
411 411
412 bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev, 412 bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
413 bp->SharedMemSize, 413 bp->SharedMemSize,
414 &bp->SharedMemDMA, 414 &bp->SharedMemDMA,
415 GFP_ATOMIC); 415 GFP_ATOMIC);
416 if (!bp->SharedMemAddr) { 416 if (!bp->SharedMemAddr) {
417 printk("could not allocate mem for "); 417 printk("could not allocate mem for ");
418 printk("hardware module: %ld byte\n", 418 printk("hardware module: %ld byte\n",
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 58bbba8582b0..3377ac66a347 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1512,9 +1512,13 @@ static void geneve_link_config(struct net_device *dev,
1512 } 1512 }
1513#if IS_ENABLED(CONFIG_IPV6) 1513#if IS_ENABLED(CONFIG_IPV6)
1514 case AF_INET6: { 1514 case AF_INET6: {
1515 struct rt6_info *rt = rt6_lookup(geneve->net, 1515 struct rt6_info *rt;
1516 &info->key.u.ipv6.dst, NULL, 0, 1516
1517 NULL, 0); 1517 if (!__in6_dev_get(dev))
1518 break;
1519
1520 rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
1521 NULL, 0);
1518 1522
1519 if (rt && rt->dst.dev) 1523 if (rt && rt->dst.dev)
1520 ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; 1524 ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index ef6f766f6389..e859ae2e42d5 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -144,6 +144,8 @@ struct hv_netvsc_packet {
144 u32 total_data_buflen; 144 u32 total_data_buflen;
145}; 145};
146 146
147#define NETVSC_HASH_KEYLEN 40
148
147struct netvsc_device_info { 149struct netvsc_device_info {
148 unsigned char mac_adr[ETH_ALEN]; 150 unsigned char mac_adr[ETH_ALEN];
149 u32 num_chn; 151 u32 num_chn;
@@ -151,6 +153,8 @@ struct netvsc_device_info {
151 u32 recv_sections; 153 u32 recv_sections;
152 u32 send_section_size; 154 u32 send_section_size;
153 u32 recv_section_size; 155 u32 recv_section_size;
156
157 u8 rss_key[NETVSC_HASH_KEYLEN];
154}; 158};
155 159
156enum rndis_device_state { 160enum rndis_device_state {
@@ -160,8 +164,6 @@ enum rndis_device_state {
160 RNDIS_DEV_DATAINITIALIZED, 164 RNDIS_DEV_DATAINITIALIZED,
161}; 165};
162 166
163#define NETVSC_HASH_KEYLEN 40
164
165struct rndis_device { 167struct rndis_device {
166 struct net_device *ndev; 168 struct net_device *ndev;
167 169
@@ -209,7 +211,9 @@ int netvsc_recv_callback(struct net_device *net,
209void netvsc_channel_cb(void *context); 211void netvsc_channel_cb(void *context);
210int netvsc_poll(struct napi_struct *napi, int budget); 212int netvsc_poll(struct napi_struct *napi, int budget);
211 213
212int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); 214int rndis_set_subchannel(struct net_device *ndev,
215 struct netvsc_device *nvdev,
216 struct netvsc_device_info *dev_info);
213int rndis_filter_open(struct netvsc_device *nvdev); 217int rndis_filter_open(struct netvsc_device *nvdev);
214int rndis_filter_close(struct netvsc_device *nvdev); 218int rndis_filter_close(struct netvsc_device *nvdev);
215struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, 219struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
@@ -1177,7 +1181,7 @@ enum ndis_per_pkt_info_type {
1177 1181
1178enum rndis_per_pkt_info_interal_type { 1182enum rndis_per_pkt_info_interal_type {
1179 RNDIS_PKTINFO_ID = 1, 1183 RNDIS_PKTINFO_ID = 1,
1180 /* Add more memebers here */ 1184 /* Add more members here */
1181 1185
1182 RNDIS_PKTINFO_MAX 1186 RNDIS_PKTINFO_MAX
1183}; 1187};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 922054c1d544..813d195bbd57 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w)
84 84
85 rdev = nvdev->extension; 85 rdev = nvdev->extension;
86 if (rdev) { 86 if (rdev) {
87 ret = rndis_set_subchannel(rdev->ndev, nvdev); 87 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
88 if (ret == 0) { 88 if (ret == 0) {
89 netif_device_attach(rdev->ndev); 89 netif_device_attach(rdev->ndev);
90 } else { 90 } else {
@@ -1331,7 +1331,7 @@ void netvsc_channel_cb(void *context)
1331 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); 1331 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1332 1332
1333 if (napi_schedule_prep(&nvchan->napi)) { 1333 if (napi_schedule_prep(&nvchan->napi)) {
1334 /* disable interupts from host */ 1334 /* disable interrupts from host */
1335 hv_begin_read(rbi); 1335 hv_begin_read(rbi);
1336 1336
1337 __napi_schedule_irqoff(&nvchan->napi); 1337 __napi_schedule_irqoff(&nvchan->napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 91ed15ea5883..256adbd044f5 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -370,7 +370,7 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
370{ 370{
371 int j = 0; 371 int j = 0;
372 372
373 /* Deal with compund pages by ignoring unused part 373 /* Deal with compound pages by ignoring unused part
374 * of the page. 374 * of the page.
375 */ 375 */
376 page += (offset >> PAGE_SHIFT); 376 page += (offset >> PAGE_SHIFT);
@@ -858,6 +858,39 @@ static void netvsc_get_channels(struct net_device *net,
858 } 858 }
859} 859}
860 860
861/* Alloc struct netvsc_device_info, and initialize it from either existing
862 * struct netvsc_device, or from default values.
863 */
864static struct netvsc_device_info *netvsc_devinfo_get
865 (struct netvsc_device *nvdev)
866{
867 struct netvsc_device_info *dev_info;
868
869 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
870
871 if (!dev_info)
872 return NULL;
873
874 if (nvdev) {
875 dev_info->num_chn = nvdev->num_chn;
876 dev_info->send_sections = nvdev->send_section_cnt;
877 dev_info->send_section_size = nvdev->send_section_size;
878 dev_info->recv_sections = nvdev->recv_section_cnt;
879 dev_info->recv_section_size = nvdev->recv_section_size;
880
881 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
882 NETVSC_HASH_KEYLEN);
883 } else {
884 dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
885 dev_info->send_sections = NETVSC_DEFAULT_TX;
886 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
887 dev_info->recv_sections = NETVSC_DEFAULT_RX;
888 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
889 }
890
891 return dev_info;
892}
893
861static int netvsc_detach(struct net_device *ndev, 894static int netvsc_detach(struct net_device *ndev,
862 struct netvsc_device *nvdev) 895 struct netvsc_device *nvdev)
863{ 896{
@@ -909,7 +942,7 @@ static int netvsc_attach(struct net_device *ndev,
909 return PTR_ERR(nvdev); 942 return PTR_ERR(nvdev);
910 943
911 if (nvdev->num_chn > 1) { 944 if (nvdev->num_chn > 1) {
912 ret = rndis_set_subchannel(ndev, nvdev); 945 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
913 946
914 /* if unavailable, just proceed with one queue */ 947 /* if unavailable, just proceed with one queue */
915 if (ret) { 948 if (ret) {
@@ -943,7 +976,7 @@ static int netvsc_set_channels(struct net_device *net,
943 struct net_device_context *net_device_ctx = netdev_priv(net); 976 struct net_device_context *net_device_ctx = netdev_priv(net);
944 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 977 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
945 unsigned int orig, count = channels->combined_count; 978 unsigned int orig, count = channels->combined_count;
946 struct netvsc_device_info device_info; 979 struct netvsc_device_info *device_info;
947 int ret; 980 int ret;
948 981
949 /* We do not support separate count for rx, tx, or other */ 982 /* We do not support separate count for rx, tx, or other */
@@ -962,24 +995,26 @@ static int netvsc_set_channels(struct net_device *net,
962 995
963 orig = nvdev->num_chn; 996 orig = nvdev->num_chn;
964 997
965 memset(&device_info, 0, sizeof(device_info)); 998 device_info = netvsc_devinfo_get(nvdev);
966 device_info.num_chn = count; 999
967 device_info.send_sections = nvdev->send_section_cnt; 1000 if (!device_info)
968 device_info.send_section_size = nvdev->send_section_size; 1001 return -ENOMEM;
969 device_info.recv_sections = nvdev->recv_section_cnt; 1002
970 device_info.recv_section_size = nvdev->recv_section_size; 1003 device_info->num_chn = count;
971 1004
972 ret = netvsc_detach(net, nvdev); 1005 ret = netvsc_detach(net, nvdev);
973 if (ret) 1006 if (ret)
974 return ret; 1007 goto out;
975 1008
976 ret = netvsc_attach(net, &device_info); 1009 ret = netvsc_attach(net, device_info);
977 if (ret) { 1010 if (ret) {
978 device_info.num_chn = orig; 1011 device_info->num_chn = orig;
979 if (netvsc_attach(net, &device_info)) 1012 if (netvsc_attach(net, device_info))
980 netdev_err(net, "restoring channel setting failed\n"); 1013 netdev_err(net, "restoring channel setting failed\n");
981 } 1014 }
982 1015
1016out:
1017 kfree(device_info);
983 return ret; 1018 return ret;
984} 1019}
985 1020
@@ -1048,48 +1083,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1048 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1083 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1049 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1084 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1050 int orig_mtu = ndev->mtu; 1085 int orig_mtu = ndev->mtu;
1051 struct netvsc_device_info device_info; 1086 struct netvsc_device_info *device_info;
1052 int ret = 0; 1087 int ret = 0;
1053 1088
1054 if (!nvdev || nvdev->destroy) 1089 if (!nvdev || nvdev->destroy)
1055 return -ENODEV; 1090 return -ENODEV;
1056 1091
1092 device_info = netvsc_devinfo_get(nvdev);
1093
1094 if (!device_info)
1095 return -ENOMEM;
1096
1057 /* Change MTU of underlying VF netdev first. */ 1097 /* Change MTU of underlying VF netdev first. */
1058 if (vf_netdev) { 1098 if (vf_netdev) {
1059 ret = dev_set_mtu(vf_netdev, mtu); 1099 ret = dev_set_mtu(vf_netdev, mtu);
1060 if (ret) 1100 if (ret)
1061 return ret; 1101 goto out;
1062 } 1102 }
1063 1103
1064 memset(&device_info, 0, sizeof(device_info));
1065 device_info.num_chn = nvdev->num_chn;
1066 device_info.send_sections = nvdev->send_section_cnt;
1067 device_info.send_section_size = nvdev->send_section_size;
1068 device_info.recv_sections = nvdev->recv_section_cnt;
1069 device_info.recv_section_size = nvdev->recv_section_size;
1070
1071 ret = netvsc_detach(ndev, nvdev); 1104 ret = netvsc_detach(ndev, nvdev);
1072 if (ret) 1105 if (ret)
1073 goto rollback_vf; 1106 goto rollback_vf;
1074 1107
1075 ndev->mtu = mtu; 1108 ndev->mtu = mtu;
1076 1109
1077 ret = netvsc_attach(ndev, &device_info); 1110 ret = netvsc_attach(ndev, device_info);
1078 if (ret) 1111 if (!ret)
1079 goto rollback; 1112 goto out;
1080
1081 return 0;
1082 1113
1083rollback:
1084 /* Attempt rollback to original MTU */ 1114 /* Attempt rollback to original MTU */
1085 ndev->mtu = orig_mtu; 1115 ndev->mtu = orig_mtu;
1086 1116
1087 if (netvsc_attach(ndev, &device_info)) 1117 if (netvsc_attach(ndev, device_info))
1088 netdev_err(ndev, "restoring mtu failed\n"); 1118 netdev_err(ndev, "restoring mtu failed\n");
1089rollback_vf: 1119rollback_vf:
1090 if (vf_netdev) 1120 if (vf_netdev)
1091 dev_set_mtu(vf_netdev, orig_mtu); 1121 dev_set_mtu(vf_netdev, orig_mtu);
1092 1122
1123out:
1124 kfree(device_info);
1093 return ret; 1125 return ret;
1094} 1126}
1095 1127
@@ -1674,7 +1706,7 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1674{ 1706{
1675 struct net_device_context *ndevctx = netdev_priv(ndev); 1707 struct net_device_context *ndevctx = netdev_priv(ndev);
1676 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1708 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1677 struct netvsc_device_info device_info; 1709 struct netvsc_device_info *device_info;
1678 struct ethtool_ringparam orig; 1710 struct ethtool_ringparam orig;
1679 u32 new_tx, new_rx; 1711 u32 new_tx, new_rx;
1680 int ret = 0; 1712 int ret = 0;
@@ -1694,26 +1726,29 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1694 new_rx == orig.rx_pending) 1726 new_rx == orig.rx_pending)
1695 return 0; /* no change */ 1727 return 0; /* no change */
1696 1728
1697 memset(&device_info, 0, sizeof(device_info)); 1729 device_info = netvsc_devinfo_get(nvdev);
1698 device_info.num_chn = nvdev->num_chn; 1730
1699 device_info.send_sections = new_tx; 1731 if (!device_info)
1700 device_info.send_section_size = nvdev->send_section_size; 1732 return -ENOMEM;
1701 device_info.recv_sections = new_rx; 1733
1702 device_info.recv_section_size = nvdev->recv_section_size; 1734 device_info->send_sections = new_tx;
1735 device_info->recv_sections = new_rx;
1703 1736
1704 ret = netvsc_detach(ndev, nvdev); 1737 ret = netvsc_detach(ndev, nvdev);
1705 if (ret) 1738 if (ret)
1706 return ret; 1739 goto out;
1707 1740
1708 ret = netvsc_attach(ndev, &device_info); 1741 ret = netvsc_attach(ndev, device_info);
1709 if (ret) { 1742 if (ret) {
1710 device_info.send_sections = orig.tx_pending; 1743 device_info->send_sections = orig.tx_pending;
1711 device_info.recv_sections = orig.rx_pending; 1744 device_info->recv_sections = orig.rx_pending;
1712 1745
1713 if (netvsc_attach(ndev, &device_info)) 1746 if (netvsc_attach(ndev, device_info))
1714 netdev_err(ndev, "restoring ringparam failed"); 1747 netdev_err(ndev, "restoring ringparam failed");
1715 } 1748 }
1716 1749
1750out:
1751 kfree(device_info);
1717 return ret; 1752 return ret;
1718} 1753}
1719 1754
@@ -2088,7 +2123,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
2088 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) 2123 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2089 return NOTIFY_DONE; 2124 return NOTIFY_DONE;
2090 2125
2091 /* if syntihetic interface is a different namespace, 2126 /* if synthetic interface is a different namespace,
2092 * then move the VF to that namespace; join will be 2127 * then move the VF to that namespace; join will be
2093 * done again in that context. 2128 * done again in that context.
2094 */ 2129 */
@@ -2167,7 +2202,7 @@ static int netvsc_probe(struct hv_device *dev,
2167{ 2202{
2168 struct net_device *net = NULL; 2203 struct net_device *net = NULL;
2169 struct net_device_context *net_device_ctx; 2204 struct net_device_context *net_device_ctx;
2170 struct netvsc_device_info device_info; 2205 struct netvsc_device_info *device_info = NULL;
2171 struct netvsc_device *nvdev; 2206 struct netvsc_device *nvdev;
2172 int ret = -ENOMEM; 2207 int ret = -ENOMEM;
2173 2208
@@ -2214,21 +2249,21 @@ static int netvsc_probe(struct hv_device *dev,
2214 netif_set_real_num_rx_queues(net, 1); 2249 netif_set_real_num_rx_queues(net, 1);
2215 2250
2216 /* Notify the netvsc driver of the new device */ 2251 /* Notify the netvsc driver of the new device */
2217 memset(&device_info, 0, sizeof(device_info)); 2252 device_info = netvsc_devinfo_get(NULL);
2218 device_info.num_chn = VRSS_CHANNEL_DEFAULT; 2253
2219 device_info.send_sections = NETVSC_DEFAULT_TX; 2254 if (!device_info) {
2220 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; 2255 ret = -ENOMEM;
2221 device_info.recv_sections = NETVSC_DEFAULT_RX; 2256 goto devinfo_failed;
2222 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; 2257 }
2223 2258
2224 nvdev = rndis_filter_device_add(dev, &device_info); 2259 nvdev = rndis_filter_device_add(dev, device_info);
2225 if (IS_ERR(nvdev)) { 2260 if (IS_ERR(nvdev)) {
2226 ret = PTR_ERR(nvdev); 2261 ret = PTR_ERR(nvdev);
2227 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 2262 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2228 goto rndis_failed; 2263 goto rndis_failed;
2229 } 2264 }
2230 2265
2231 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 2266 memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2232 2267
2233 /* We must get rtnl lock before scheduling nvdev->subchan_work, 2268 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2234 * otherwise netvsc_subchan_work() can get rtnl lock first and wait 2269 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2236,7 +2271,7 @@ static int netvsc_probe(struct hv_device *dev,
2236 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() 2271 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2237 * -> ... -> device_add() -> ... -> __device_attach() can't get 2272 * -> ... -> device_add() -> ... -> __device_attach() can't get
2238 * the device lock, so all the subchannels can't be processed -- 2273 * the device lock, so all the subchannels can't be processed --
2239 * finally netvsc_subchan_work() hangs for ever. 2274 * finally netvsc_subchan_work() hangs forever.
2240 */ 2275 */
2241 rtnl_lock(); 2276 rtnl_lock();
2242 2277
@@ -2266,12 +2301,16 @@ static int netvsc_probe(struct hv_device *dev,
2266 2301
2267 list_add(&net_device_ctx->list, &netvsc_dev_list); 2302 list_add(&net_device_ctx->list, &netvsc_dev_list);
2268 rtnl_unlock(); 2303 rtnl_unlock();
2304
2305 kfree(device_info);
2269 return 0; 2306 return 0;
2270 2307
2271register_failed: 2308register_failed:
2272 rtnl_unlock(); 2309 rtnl_unlock();
2273 rndis_filter_device_remove(dev, nvdev); 2310 rndis_filter_device_remove(dev, nvdev);
2274rndis_failed: 2311rndis_failed:
2312 kfree(device_info);
2313devinfo_failed:
2275 free_percpu(net_device_ctx->vf_stats); 2314 free_percpu(net_device_ctx->vf_stats);
2276no_stats: 2315no_stats:
2277 hv_set_drvdata(dev, NULL); 2316 hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8b537a049c1e..73b60592de06 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -774,8 +774,8 @@ cleanup:
774 return ret; 774 return ret;
775} 775}
776 776
777int rndis_filter_set_rss_param(struct rndis_device *rdev, 777static int rndis_set_rss_param_msg(struct rndis_device *rdev,
778 const u8 *rss_key) 778 const u8 *rss_key, u16 flag)
779{ 779{
780 struct net_device *ndev = rdev->ndev; 780 struct net_device *ndev = rdev->ndev;
781 struct rndis_request *request; 781 struct rndis_request *request;
@@ -804,7 +804,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
804 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; 804 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
805 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; 805 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
806 rssp->hdr.size = sizeof(struct ndis_recv_scale_param); 806 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
807 rssp->flag = 0; 807 rssp->flag = flag;
808 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | 808 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
809 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | 809 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
810 NDIS_HASH_TCP_IPV6; 810 NDIS_HASH_TCP_IPV6;
@@ -829,9 +829,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
829 829
830 wait_for_completion(&request->wait_event); 830 wait_for_completion(&request->wait_event);
831 set_complete = &request->response_msg.msg.set_complete; 831 set_complete = &request->response_msg.msg.set_complete;
832 if (set_complete->status == RNDIS_STATUS_SUCCESS) 832 if (set_complete->status == RNDIS_STATUS_SUCCESS) {
833 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); 833 if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
834 else { 834 !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
835 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
836
837 } else {
835 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", 838 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
836 set_complete->status); 839 set_complete->status);
837 ret = -EINVAL; 840 ret = -EINVAL;
@@ -842,6 +845,16 @@ cleanup:
842 return ret; 845 return ret;
843} 846}
844 847
848int rndis_filter_set_rss_param(struct rndis_device *rdev,
849 const u8 *rss_key)
850{
851 /* Disable RSS before change */
852 rndis_set_rss_param_msg(rdev, rss_key,
853 NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
854
855 return rndis_set_rss_param_msg(rdev, rss_key, 0);
856}
857
845static int rndis_filter_query_device_link_status(struct rndis_device *dev, 858static int rndis_filter_query_device_link_status(struct rndis_device *dev,
846 struct netvsc_device *net_device) 859 struct netvsc_device *net_device)
847{ 860{
@@ -1121,7 +1134,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
1121 * This breaks overlap of processing the host message for the 1134 * This breaks overlap of processing the host message for the
1122 * new primary channel with the initialization of sub-channels. 1135 * new primary channel with the initialization of sub-channels.
1123 */ 1136 */
1124int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) 1137int rndis_set_subchannel(struct net_device *ndev,
1138 struct netvsc_device *nvdev,
1139 struct netvsc_device_info *dev_info)
1125{ 1140{
1126 struct nvsp_message *init_packet = &nvdev->channel_init_pkt; 1141 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1127 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1142 struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1161,8 +1176,11 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
1161 wait_event(nvdev->subchan_open, 1176 wait_event(nvdev->subchan_open,
1162 atomic_read(&nvdev->open_chn) == nvdev->num_chn); 1177 atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1163 1178
1164 /* ignore failues from setting rss parameters, still have channels */ 1179 /* ignore failures from setting rss parameters, still have channels */
1165 rndis_filter_set_rss_param(rdev, netvsc_hash_key); 1180 if (dev_info)
1181 rndis_filter_set_rss_param(rdev, dev_info->rss_key);
1182 else
1183 rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1166 1184
1167 netif_set_real_num_tx_queues(ndev, nvdev->num_chn); 1185 netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1168 netif_set_real_num_rx_queues(ndev, nvdev->num_chn); 1186 netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 44de81e5f140..c589f5ae75bb 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context)
905 } 905 }
906 break; 906 break;
907 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): 907 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
908 /* rx is starting */ 908 /* rx is starting */
909 dev_dbg(printdev(lp), "RX is starting\n"); 909 dev_dbg(printdev(lp), "RX is starting\n");
910 mcr20a_handle_rx(lp); 910 mcr20a_handle_rx(lp);
911 break; 911 break;
912 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): 912 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
913 if (lp->is_tx) { 913 if (lp->is_tx) {
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 19bdde60680c..07e41c42bcf5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -100,12 +100,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval,
100 err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); 100 err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
101 if (!err) { 101 if (!err) {
102 mdev->l3mdev_ops = &ipvl_l3mdev_ops; 102 mdev->l3mdev_ops = &ipvl_l3mdev_ops;
103 mdev->priv_flags |= IFF_L3MDEV_MASTER; 103 mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
104 } else 104 } else
105 goto fail; 105 goto fail;
106 } else if (port->mode == IPVLAN_MODE_L3S) { 106 } else if (port->mode == IPVLAN_MODE_L3S) {
107 /* Old mode was L3S */ 107 /* Old mode was L3S */
108 mdev->priv_flags &= ~IFF_L3MDEV_MASTER; 108 mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
109 ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); 109 ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
110 mdev->l3mdev_ops = NULL; 110 mdev->l3mdev_ops = NULL;
111 } 111 }
@@ -167,7 +167,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
167 struct sk_buff *skb; 167 struct sk_buff *skb;
168 168
169 if (port->mode == IPVLAN_MODE_L3S) { 169 if (port->mode == IPVLAN_MODE_L3S) {
170 dev->priv_flags &= ~IFF_L3MDEV_MASTER; 170 dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
171 ipvlan_unregister_nf_hook(dev_net(dev)); 171 ipvlan_unregister_nf_hook(dev_net(dev));
172 dev->l3mdev_ops = NULL; 172 dev->l3mdev_ops = NULL;
173 } 173 }
@@ -499,6 +499,8 @@ static int ipvlan_nl_changelink(struct net_device *dev,
499 499
500 if (!data) 500 if (!data)
501 return 0; 501 return 0;
502 if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
503 return -EPERM;
502 504
503 if (data[IFLA_IPVLAN_MODE]) { 505 if (data[IFLA_IPVLAN_MODE]) {
504 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); 506 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -601,6 +603,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
601 struct ipvl_dev *tmp = netdev_priv(phy_dev); 603 struct ipvl_dev *tmp = netdev_priv(phy_dev);
602 604
603 phy_dev = tmp->phy_dev; 605 phy_dev = tmp->phy_dev;
606 if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
607 return -EPERM;
604 } else if (!netif_is_ipvlan_port(phy_dev)) { 608 } else if (!netif_is_ipvlan_port(phy_dev)) {
605 /* Exit early if the underlying link is invalid or busy */ 609 /* Exit early if the underlying link is invalid or busy */
606 if (phy_dev->type != ARPHRD_ETHER || 610 if (phy_dev->type != ARPHRD_ETHER ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fc726ce4c164..6d067176320f 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -337,7 +337,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
337 337
338 if (src) 338 if (src)
339 dev_put(src->dev); 339 dev_put(src->dev);
340 kfree_skb(skb); 340 consume_skb(skb);
341 } 341 }
342} 342}
343 343
diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c
index 8ebe7f5484ae..f14ba5366b91 100644
--- a/drivers/net/phy/asix.c
+++ b/drivers/net/phy/asix.c
@@ -1,13 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/* Driver for Asix PHYs 2/* Driver for Asix PHYs
3 * 3 *
4 * Author: Michael Schmitz <schmitzmic@gmail.com> 4 * Author: Michael Schmitz <schmitzmic@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */ 5 */
12#include <linux/kernel.h> 6#include <linux/kernel.h>
13#include <linux/errno.h> 7#include <linux/errno.h>
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 1b350183bffb..a271239748f2 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -197,6 +197,7 @@ static struct phy_driver bcm87xx_driver[] = {
197 .phy_id = PHY_ID_BCM8706, 197 .phy_id = PHY_ID_BCM8706,
198 .phy_id_mask = 0xffffffff, 198 .phy_id_mask = 0xffffffff,
199 .name = "Broadcom BCM8706", 199 .name = "Broadcom BCM8706",
200 .features = PHY_10GBIT_FEC_FEATURES,
200 .config_init = bcm87xx_config_init, 201 .config_init = bcm87xx_config_init,
201 .config_aneg = bcm87xx_config_aneg, 202 .config_aneg = bcm87xx_config_aneg,
202 .read_status = bcm87xx_read_status, 203 .read_status = bcm87xx_read_status,
@@ -208,6 +209,7 @@ static struct phy_driver bcm87xx_driver[] = {
208 .phy_id = PHY_ID_BCM8727, 209 .phy_id = PHY_ID_BCM8727,
209 .phy_id_mask = 0xffffffff, 210 .phy_id_mask = 0xffffffff,
210 .name = "Broadcom BCM8727", 211 .name = "Broadcom BCM8727",
212 .features = PHY_10GBIT_FEC_FEATURES,
211 .config_init = bcm87xx_config_init, 213 .config_init = bcm87xx_config_init,
212 .config_aneg = bcm87xx_config_aneg, 214 .config_aneg = bcm87xx_config_aneg,
213 .read_status = bcm87xx_read_status, 215 .read_status = bcm87xx_read_status,
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 8022cd317f62..1a4d04afb7f0 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -88,6 +88,7 @@ static struct phy_driver cortina_driver[] = {
88 .phy_id = PHY_ID_CS4340, 88 .phy_id = PHY_ID_CS4340,
89 .phy_id_mask = 0xffffffff, 89 .phy_id_mask = 0xffffffff,
90 .name = "Cortina CS4340", 90 .name = "Cortina CS4340",
91 .features = PHY_10GBIT_FEATURES,
91 .config_init = gen10g_config_init, 92 .config_init = gen10g_config_init,
92 .config_aneg = gen10g_config_aneg, 93 .config_aneg = gen10g_config_aneg,
93 .read_status = cortina_read_status, 94 .read_status = cortina_read_status,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 18b41bc345ab..6e8807212aa3 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -898,14 +898,14 @@ static void decode_txts(struct dp83640_private *dp83640,
898 struct phy_txts *phy_txts) 898 struct phy_txts *phy_txts)
899{ 899{
900 struct skb_shared_hwtstamps shhwtstamps; 900 struct skb_shared_hwtstamps shhwtstamps;
901 struct dp83640_skb_info *skb_info;
901 struct sk_buff *skb; 902 struct sk_buff *skb;
902 u64 ns;
903 u8 overflow; 903 u8 overflow;
904 u64 ns;
904 905
905 /* We must already have the skb that triggered this. */ 906 /* We must already have the skb that triggered this. */
906 907again:
907 skb = skb_dequeue(&dp83640->tx_queue); 908 skb = skb_dequeue(&dp83640->tx_queue);
908
909 if (!skb) { 909 if (!skb) {
910 pr_debug("have timestamp but tx_queue empty\n"); 910 pr_debug("have timestamp but tx_queue empty\n");
911 return; 911 return;
@@ -920,6 +920,11 @@ static void decode_txts(struct dp83640_private *dp83640,
920 } 920 }
921 return; 921 return;
922 } 922 }
923 skb_info = (struct dp83640_skb_info *)skb->cb;
924 if (time_after(jiffies, skb_info->tmo)) {
925 kfree_skb(skb);
926 goto again;
927 }
923 928
924 ns = phy2txts(phy_txts); 929 ns = phy2txts(phy_txts);
925 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 930 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -1472,6 +1477,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1472static void dp83640_txtstamp(struct phy_device *phydev, 1477static void dp83640_txtstamp(struct phy_device *phydev,
1473 struct sk_buff *skb, int type) 1478 struct sk_buff *skb, int type)
1474{ 1479{
1480 struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
1475 struct dp83640_private *dp83640 = phydev->priv; 1481 struct dp83640_private *dp83640 = phydev->priv;
1476 1482
1477 switch (dp83640->hwts_tx_en) { 1483 switch (dp83640->hwts_tx_en) {
@@ -1484,6 +1490,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
1484 /* fall through */ 1490 /* fall through */
1485 case HWTSTAMP_TX_ON: 1491 case HWTSTAMP_TX_ON:
1486 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1492 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1493 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
1487 skb_queue_tail(&dp83640->tx_queue, skb); 1494 skb_queue_tail(&dp83640->tx_queue, skb);
1488 break; 1495 break;
1489 1496
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a9c7c7f41b0c..abb7876a8776 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
847 847
848 /* SGMII-to-Copper mode initialization */ 848 /* SGMII-to-Copper mode initialization */
849 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { 849 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
850
851 /* Select page 18 */ 850 /* Select page 18 */
852 err = marvell_set_page(phydev, 18); 851 err = marvell_set_page(phydev, 18);
853 if (err < 0) 852 if (err < 0)
@@ -870,21 +869,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
870 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); 869 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
871 if (err < 0) 870 if (err < 0)
872 return err; 871 return err;
873
874 /* There appears to be a bug in the 88e1512 when used in
875 * SGMII to copper mode, where the AN advertisement register
876 * clears the pause bits each time a negotiation occurs.
877 * This means we can never be truely sure what was advertised,
878 * so disable Pause support.
879 */
880 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
881 phydev->supported);
882 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
883 phydev->supported);
884 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
885 phydev->advertising);
886 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
887 phydev->advertising);
888 } 872 }
889 873
890 return m88e1318_config_init(phydev); 874 return m88e1318_config_init(phydev);
@@ -1046,6 +1030,39 @@ static int m88e1145_config_init(struct phy_device *phydev)
1046 return 0; 1030 return 0;
1047} 1031}
1048 1032
1033/* The VOD can be out of specification on link up. Poke an
1034 * undocumented register, in an undocumented page, with a magic value
1035 * to fix this.
1036 */
1037static int m88e6390_errata(struct phy_device *phydev)
1038{
1039 int err;
1040
1041 err = phy_write(phydev, MII_BMCR,
1042 BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
1043 if (err)
1044 return err;
1045
1046 usleep_range(300, 400);
1047
1048 err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
1049 if (err)
1050 return err;
1051
1052 return genphy_soft_reset(phydev);
1053}
1054
1055static int m88e6390_config_aneg(struct phy_device *phydev)
1056{
1057 int err;
1058
1059 err = m88e6390_errata(phydev);
1060 if (err)
1061 return err;
1062
1063 return m88e1510_config_aneg(phydev);
1064}
1065
1049/** 1066/**
1050 * fiber_lpa_mod_linkmode_lpa_t 1067 * fiber_lpa_mod_linkmode_lpa_t
1051 * @advertising: the linkmode advertisement settings 1068 * @advertising: the linkmode advertisement settings
@@ -1402,7 +1419,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
1402 * before enabling it if !phy_interrupt_is_valid() 1419 * before enabling it if !phy_interrupt_is_valid()
1403 */ 1420 */
1404 if (!phy_interrupt_is_valid(phydev)) 1421 if (!phy_interrupt_is_valid(phydev))
1405 phy_read(phydev, MII_M1011_IEVENT); 1422 __phy_read(phydev, MII_M1011_IEVENT);
1406 1423
1407 /* Enable the WOL interrupt */ 1424 /* Enable the WOL interrupt */
1408 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, 1425 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
@@ -2283,7 +2300,7 @@ static struct phy_driver marvell_drivers[] = {
2283 .features = PHY_GBIT_FEATURES, 2300 .features = PHY_GBIT_FEATURES,
2284 .probe = m88e6390_probe, 2301 .probe = m88e6390_probe,
2285 .config_init = &marvell_config_init, 2302 .config_init = &marvell_config_init,
2286 .config_aneg = &m88e1510_config_aneg, 2303 .config_aneg = &m88e6390_config_aneg,
2287 .read_status = &marvell_read_status, 2304 .read_status = &marvell_read_status,
2288 .ack_interrupt = &marvell_ack_interrupt, 2305 .ack_interrupt = &marvell_ack_interrupt,
2289 .config_intr = &marvell_config_intr, 2306 .config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 82ab6ed3b74e..6bac602094bd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -26,6 +26,8 @@
26#include <linux/marvell_phy.h> 26#include <linux/marvell_phy.h>
27#include <linux/phy.h> 27#include <linux/phy.h>
28 28
29#define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK 0x01e0
30
29enum { 31enum {
30 MV_PCS_BASE_T = 0x0000, 32 MV_PCS_BASE_T = 0x0000,
31 MV_PCS_BASE_R = 0x1000, 33 MV_PCS_BASE_R = 0x1000,
@@ -386,8 +388,10 @@ static int mv3310_config_aneg(struct phy_device *phydev)
386 else 388 else
387 reg = 0; 389 reg = 0;
388 390
391 /* Make sure we clear unsupported 2.5G/5G advertising */
389 ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, 392 ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
390 MDIO_AN_10GBT_CTRL_ADV10G, reg); 393 MDIO_AN_10GBT_CTRL_ADV10G |
394 MDIO_AN_10GBT_CTRL_ADV_NBT_MASK, reg);
391 if (ret < 0) 395 if (ret < 0)
392 return ret; 396 return ret;
393 if (ret > 0) 397 if (ret > 0)
diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c
index b03fedd6c1d8..287f3ccf1da1 100644
--- a/drivers/net/phy/mdio-hisi-femac.c
+++ b/drivers/net/phy/mdio-hisi-femac.c
@@ -1,20 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Hisilicon Fast Ethernet MDIO Bus Driver 3 * Hisilicon Fast Ethernet MDIO Bus Driver
3 * 4 *
4 * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. 5 * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 6 */
19 7
20#include <linux/clk.h> 8#include <linux/clk.h>
@@ -163,4 +151,4 @@ module_platform_driver(hisi_femac_mdio_driver);
163 151
164MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver"); 152MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver");
165MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>"); 153MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
166MODULE_LICENSE("GPL v2"); 154MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e59a8419b17..7368616286ae 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -379,7 +379,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
379 err = device_register(&bus->dev); 379 err = device_register(&bus->dev);
380 if (err) { 380 if (err) {
381 pr_err("mii_bus %s failed to register\n", bus->id); 381 pr_err("mii_bus %s failed to register\n", bus->id);
382 put_device(&bus->dev);
383 return -EINVAL; 382 return -EINVAL;
384 } 383 }
385 384
@@ -390,6 +389,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
390 if (IS_ERR(gpiod)) { 389 if (IS_ERR(gpiod)) {
391 dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", 390 dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
392 bus->id); 391 bus->id);
392 device_del(&bus->dev);
393 return PTR_ERR(gpiod); 393 return PTR_ERR(gpiod);
394 } else if (gpiod) { 394 } else if (gpiod) {
395 bus->reset_gpiod = gpiod; 395 bus->reset_gpiod = gpiod;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index b03bcf2c388a..3ddaf9595697 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -233,6 +233,7 @@ static struct phy_driver meson_gxl_phy[] = {
233 .name = "Meson GXL Internal PHY", 233 .name = "Meson GXL Internal PHY",
234 .features = PHY_BASIC_FEATURES, 234 .features = PHY_BASIC_FEATURES,
235 .flags = PHY_IS_INTERNAL, 235 .flags = PHY_IS_INTERNAL,
236 .soft_reset = genphy_soft_reset,
236 .config_init = meson_gxl_config_init, 237 .config_init = meson_gxl_config_init,
237 .aneg_done = genphy_aneg_done, 238 .aneg_done = genphy_aneg_done,
238 .read_status = meson_gxl_read_status, 239 .read_status = meson_gxl_read_status,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c33384710d26..b1f959935f50 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1070,6 +1070,7 @@ static struct phy_driver ksphy_driver[] = {
1070 .driver_data = &ksz9021_type, 1070 .driver_data = &ksz9021_type,
1071 .probe = kszphy_probe, 1071 .probe = kszphy_probe,
1072 .config_init = ksz9031_config_init, 1072 .config_init = ksz9031_config_init,
1073 .soft_reset = genphy_soft_reset,
1073 .read_status = ksz9031_read_status, 1074 .read_status = ksz9031_read_status,
1074 .ack_interrupt = kszphy_ack_interrupt, 1075 .ack_interrupt = kszphy_ack_interrupt,
1075 .config_intr = kszphy_config_intr, 1076 .config_intr = kszphy_config_intr,
@@ -1098,6 +1099,7 @@ static struct phy_driver ksphy_driver[] = {
1098 .phy_id = PHY_ID_KSZ8873MLL, 1099 .phy_id = PHY_ID_KSZ8873MLL,
1099 .phy_id_mask = MICREL_PHY_ID_MASK, 1100 .phy_id_mask = MICREL_PHY_ID_MASK,
1100 .name = "Micrel KSZ8873MLL Switch", 1101 .name = "Micrel KSZ8873MLL Switch",
1102 .features = PHY_BASIC_FEATURES,
1101 .config_init = kszphy_config_init, 1103 .config_init = kszphy_config_init,
1102 .config_aneg = ksz8873mll_config_aneg, 1104 .config_aneg = ksz8873mll_config_aneg,
1103 .read_status = ksz8873mll_read_status, 1105 .read_status = ksz8873mll_read_status,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d33e7b3caf03..c5675df5fc6f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -543,13 +543,6 @@ int phy_start_aneg(struct phy_device *phydev)
543 543
544 mutex_lock(&phydev->lock); 544 mutex_lock(&phydev->lock);
545 545
546 if (!__phy_is_started(phydev)) {
547 WARN(1, "called from state %s\n",
548 phy_state_to_str(phydev->state));
549 err = -EBUSY;
550 goto out_unlock;
551 }
552
553 if (AUTONEG_DISABLE == phydev->autoneg) 546 if (AUTONEG_DISABLE == phydev->autoneg)
554 phy_sanitize_settings(phydev); 547 phy_sanitize_settings(phydev);
555 548
@@ -560,11 +553,13 @@ int phy_start_aneg(struct phy_device *phydev)
560 if (err < 0) 553 if (err < 0)
561 goto out_unlock; 554 goto out_unlock;
562 555
563 if (phydev->autoneg == AUTONEG_ENABLE) { 556 if (phy_is_started(phydev)) {
564 err = phy_check_link_status(phydev); 557 if (phydev->autoneg == AUTONEG_ENABLE) {
565 } else { 558 err = phy_check_link_status(phydev);
566 phydev->state = PHY_FORCING; 559 } else {
567 phydev->link_timeout = PHY_FORCE_TIMEOUT; 560 phydev->state = PHY_FORCING;
561 phydev->link_timeout = PHY_FORCE_TIMEOUT;
562 }
568 } 563 }
569 564
570out_unlock: 565out_unlock:
@@ -714,7 +709,7 @@ void phy_stop_machine(struct phy_device *phydev)
714 cancel_delayed_work_sync(&phydev->state_queue); 709 cancel_delayed_work_sync(&phydev->state_queue);
715 710
716 mutex_lock(&phydev->lock); 711 mutex_lock(&phydev->lock);
717 if (__phy_is_started(phydev)) 712 if (phy_is_started(phydev))
718 phydev->state = PHY_UP; 713 phydev->state = PHY_UP;
719 mutex_unlock(&phydev->lock); 714 mutex_unlock(&phydev->lock);
720} 715}
@@ -767,9 +762,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
767{ 762{
768 struct phy_device *phydev = phy_dat; 763 struct phy_device *phydev = phy_dat;
769 764
770 if (!phy_is_started(phydev))
771 return IRQ_NONE; /* It can't be ours. */
772
773 if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) 765 if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
774 return IRQ_NONE; 766 return IRQ_NONE;
775 767
@@ -847,15 +839,14 @@ EXPORT_SYMBOL(phy_stop_interrupts);
847 */ 839 */
848void phy_stop(struct phy_device *phydev) 840void phy_stop(struct phy_device *phydev)
849{ 841{
850 mutex_lock(&phydev->lock); 842 if (!phy_is_started(phydev)) {
851
852 if (!__phy_is_started(phydev)) {
853 WARN(1, "called from state %s\n", 843 WARN(1, "called from state %s\n",
854 phy_state_to_str(phydev->state)); 844 phy_state_to_str(phydev->state));
855 mutex_unlock(&phydev->lock);
856 return; 845 return;
857 } 846 }
858 847
848 mutex_lock(&phydev->lock);
849
859 if (phy_interrupt_is_valid(phydev)) 850 if (phy_interrupt_is_valid(phydev))
860 phy_disable_interrupts(phydev); 851 phy_disable_interrupts(phydev);
861 852
@@ -994,8 +985,10 @@ void phy_state_machine(struct work_struct *work)
994 * state machine would be pointless and possibly error prone when 985 * state machine would be pointless and possibly error prone when
995 * called from phy_disconnect() synchronously. 986 * called from phy_disconnect() synchronously.
996 */ 987 */
988 mutex_lock(&phydev->lock);
997 if (phy_polling_mode(phydev) && phy_is_started(phydev)) 989 if (phy_polling_mode(phydev) && phy_is_started(phydev))
998 phy_queue_state_machine(phydev, PHY_STATE_TIME); 990 phy_queue_state_machine(phydev, PHY_STATE_TIME);
991 mutex_unlock(&phydev->lock);
999} 992}
1000 993
1001/** 994/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 51990002d495..46c86725a693 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -61,6 +61,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
61__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; 61__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
62EXPORT_SYMBOL_GPL(phy_10gbit_features); 62EXPORT_SYMBOL_GPL(phy_10gbit_features);
63 63
64__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
65EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
66
64static const int phy_basic_ports_array[] = { 67static const int phy_basic_ports_array[] = {
65 ETHTOOL_LINK_MODE_Autoneg_BIT, 68 ETHTOOL_LINK_MODE_Autoneg_BIT,
66 ETHTOOL_LINK_MODE_TP_BIT, 69 ETHTOOL_LINK_MODE_TP_BIT,
@@ -109,6 +112,11 @@ const int phy_10gbit_features_array[1] = {
109}; 112};
110EXPORT_SYMBOL_GPL(phy_10gbit_features_array); 113EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
111 114
115const int phy_10gbit_fec_features_array[1] = {
116 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
117};
118EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array);
119
112__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; 120__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
113EXPORT_SYMBOL_GPL(phy_10gbit_full_features); 121EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
114 122
@@ -191,6 +199,10 @@ static void features_init(void)
191 linkmode_set_bit_array(phy_10gbit_full_features_array, 199 linkmode_set_bit_array(phy_10gbit_full_features_array,
192 ARRAY_SIZE(phy_10gbit_full_features_array), 200 ARRAY_SIZE(phy_10gbit_full_features_array),
193 phy_10gbit_full_features); 201 phy_10gbit_full_features);
202 /* 10G FEC only */
203 linkmode_set_bit_array(phy_10gbit_fec_features_array,
204 ARRAY_SIZE(phy_10gbit_fec_features_array),
205 phy_10gbit_fec_features);
194} 206}
195 207
196void phy_device_free(struct phy_device *phydev) 208void phy_device_free(struct phy_device *phydev)
@@ -2243,6 +2255,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
2243{ 2255{
2244 int retval; 2256 int retval;
2245 2257
2258 if (WARN_ON(!new_driver->features)) {
2259 pr_err("%s: Driver features are missing\n", new_driver->name);
2260 return -EINVAL;
2261 }
2262
2246 new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; 2263 new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
2247 new_driver->mdiodrv.driver.name = new_driver->name; 2264 new_driver->mdiodrv.driver.name = new_driver->name;
2248 new_driver->mdiodrv.driver.bus = &mdio_bus_type; 2265 new_driver->mdiodrv.driver.bus = &mdio_bus_type;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index e7becc7379d7..938803237d7f 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -474,6 +474,17 @@ static void phylink_run_resolve(struct phylink *pl)
474 queue_work(system_power_efficient_wq, &pl->resolve); 474 queue_work(system_power_efficient_wq, &pl->resolve);
475} 475}
476 476
477static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
478{
479 unsigned long state = pl->phylink_disable_state;
480
481 set_bit(bit, &pl->phylink_disable_state);
482 if (state == 0) {
483 queue_work(system_power_efficient_wq, &pl->resolve);
484 flush_work(&pl->resolve);
485 }
486}
487
477static void phylink_fixed_poll(struct timer_list *t) 488static void phylink_fixed_poll(struct timer_list *t)
478{ 489{
479 struct phylink *pl = container_of(t, struct phylink, link_poll); 490 struct phylink *pl = container_of(t, struct phylink, link_poll);
@@ -924,9 +935,7 @@ void phylink_stop(struct phylink *pl)
924 if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) 935 if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
925 del_timer_sync(&pl->link_poll); 936 del_timer_sync(&pl->link_poll);
926 937
927 set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); 938 phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
928 queue_work(system_power_efficient_wq, &pl->resolve);
929 flush_work(&pl->resolve);
930} 939}
931EXPORT_SYMBOL_GPL(phylink_stop); 940EXPORT_SYMBOL_GPL(phylink_stop);
932 941
@@ -1632,9 +1641,7 @@ static void phylink_sfp_link_down(void *upstream)
1632 1641
1633 ASSERT_RTNL(); 1642 ASSERT_RTNL();
1634 1643
1635 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); 1644 phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
1636 queue_work(system_power_efficient_wq, &pl->resolve);
1637 flush_work(&pl->resolve);
1638} 1645}
1639 1646
1640static void phylink_sfp_link_up(void *upstream) 1647static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index c6010fb1aa0f..cb4a23041a94 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -282,6 +282,13 @@ static struct phy_driver realtek_drvs[] = {
282 .name = "RTL8366RB Gigabit Ethernet", 282 .name = "RTL8366RB Gigabit Ethernet",
283 .features = PHY_GBIT_FEATURES, 283 .features = PHY_GBIT_FEATURES,
284 .config_init = &rtl8366rb_config_init, 284 .config_init = &rtl8366rb_config_init,
285 /* These interrupts are handled by the irq controller
286 * embedded inside the RTL8366RB, they get unmasked when the
287 * irq is requested and ACKed by reading the status register,
288 * which is done by the irqchip code.
289 */
290 .ack_interrupt = genphy_no_ack_interrupt,
291 .config_intr = genphy_no_config_intr,
285 .suspend = genphy_suspend, 292 .suspend = genphy_suspend,
286 .resume = genphy_resume, 293 .resume = genphy_resume,
287 }, 294 },
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index f1da70b9b55f..95abf7072f32 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/** 2/**
2 * drivers/net/phy/rockchip.c 3 * drivers/net/phy/rockchip.c
3 * 4 *
@@ -6,12 +7,6 @@
6 * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd 7 * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
7 * 8 *
8 * David Wu <david.wu@rock-chips.com> 9 * David Wu <david.wu@rock-chips.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 */ 10 */
16 11
17#include <linux/ethtool.h> 12#include <linux/ethtool.h>
@@ -229,4 +224,4 @@ MODULE_DEVICE_TABLE(mdio, rockchip_phy_tbl);
229 224
230MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>"); 225MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>");
231MODULE_DESCRIPTION("Rockchip Ethernet PHY driver"); 226MODULE_DESCRIPTION("Rockchip Ethernet PHY driver");
232MODULE_LICENSE("GPL v2"); 227MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index ad9db652874d..fef701bfad62 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
347 return ret; 347 return ret;
348 } 348 }
349 } 349 }
350 bus->socket_ops->attach(bus->sfp);
350 if (bus->started) 351 if (bus->started)
351 bus->socket_ops->start(bus->sfp); 352 bus->socket_ops->start(bus->sfp);
352 bus->netdev->sfp_bus = bus; 353 bus->netdev->sfp_bus = bus;
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
362 if (bus->registered) { 363 if (bus->registered) {
363 if (bus->started) 364 if (bus->started)
364 bus->socket_ops->stop(bus->sfp); 365 bus->socket_ops->stop(bus->sfp);
366 bus->socket_ops->detach(bus->sfp);
365 if (bus->phydev && ops && ops->disconnect_phy) 367 if (bus->phydev && ops && ops->disconnect_phy)
366 ops->disconnect_phy(bus->upstream); 368 ops->disconnect_phy(bus->upstream);
367 } 369 }
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index fd8bb998ae52..68c8fbf099f8 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -184,6 +184,7 @@ struct sfp {
184 184
185 struct gpio_desc *gpio[GPIO_MAX]; 185 struct gpio_desc *gpio[GPIO_MAX];
186 186
187 bool attached;
187 unsigned int state; 188 unsigned int state;
188 struct delayed_work poll; 189 struct delayed_work poll;
189 struct delayed_work timeout; 190 struct delayed_work timeout;
@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
1475 */ 1476 */
1476 switch (sfp->sm_mod_state) { 1477 switch (sfp->sm_mod_state) {
1477 default: 1478 default:
1478 if (event == SFP_E_INSERT) { 1479 if (event == SFP_E_INSERT && sfp->attached) {
1479 sfp_module_tx_disable(sfp); 1480 sfp_module_tx_disable(sfp);
1480 sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); 1481 sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
1481 } 1482 }
@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
1607 mutex_unlock(&sfp->sm_mutex); 1608 mutex_unlock(&sfp->sm_mutex);
1608} 1609}
1609 1610
1611static void sfp_attach(struct sfp *sfp)
1612{
1613 sfp->attached = true;
1614 if (sfp->state & SFP_F_PRESENT)
1615 sfp_sm_event(sfp, SFP_E_INSERT);
1616}
1617
1618static void sfp_detach(struct sfp *sfp)
1619{
1620 sfp->attached = false;
1621 sfp_sm_event(sfp, SFP_E_REMOVE);
1622}
1623
1610static void sfp_start(struct sfp *sfp) 1624static void sfp_start(struct sfp *sfp)
1611{ 1625{
1612 sfp_sm_event(sfp, SFP_E_DEV_UP); 1626 sfp_sm_event(sfp, SFP_E_DEV_UP);
@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
1667} 1681}
1668 1682
1669static const struct sfp_socket_ops sfp_module_ops = { 1683static const struct sfp_socket_ops sfp_module_ops = {
1684 .attach = sfp_attach,
1685 .detach = sfp_detach,
1670 .start = sfp_start, 1686 .start = sfp_start,
1671 .stop = sfp_stop, 1687 .stop = sfp_stop,
1672 .module_info = sfp_module_info, 1688 .module_info = sfp_module_info,
@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
1834 dev_info(sfp->dev, "Host maximum power %u.%uW\n", 1850 dev_info(sfp->dev, "Host maximum power %u.%uW\n",
1835 sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); 1851 sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
1836 1852
1837 sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
1838 if (!sfp->sfp_bus)
1839 return -ENOMEM;
1840
1841 /* Get the initial state, and always signal TX disable, 1853 /* Get the initial state, and always signal TX disable,
1842 * since the network interface will not be up. 1854 * since the network interface will not be up.
1843 */ 1855 */
@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
1848 sfp->state |= SFP_F_RATE_SELECT; 1860 sfp->state |= SFP_F_RATE_SELECT;
1849 sfp_set_state(sfp, sfp->state); 1861 sfp_set_state(sfp, sfp->state);
1850 sfp_module_tx_disable(sfp); 1862 sfp_module_tx_disable(sfp);
1851 rtnl_lock();
1852 if (sfp->state & SFP_F_PRESENT)
1853 sfp_sm_event(sfp, SFP_E_INSERT);
1854 rtnl_unlock();
1855 1863
1856 for (i = 0; i < GPIO_MAX; i++) { 1864 for (i = 0; i < GPIO_MAX; i++) {
1857 if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) 1865 if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
1884 dev_warn(sfp->dev, 1892 dev_warn(sfp->dev,
1885 "No tx_disable pin: SFP modules will always be emitting.\n"); 1893 "No tx_disable pin: SFP modules will always be emitting.\n");
1886 1894
1895 sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
1896 if (!sfp->sfp_bus)
1897 return -ENOMEM;
1898
1887 return 0; 1899 return 0;
1888} 1900}
1889 1901
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 31b0acf337e2..64f54b0bbd8c 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -7,6 +7,8 @@
7struct sfp; 7struct sfp;
8 8
9struct sfp_socket_ops { 9struct sfp_socket_ops {
10 void (*attach)(struct sfp *sfp);
11 void (*detach)(struct sfp *sfp);
10 void (*start)(struct sfp *sfp); 12 void (*start)(struct sfp *sfp);
11 void (*stop)(struct sfp *sfp); 13 void (*stop)(struct sfp *sfp);
12 int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); 14 int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 22f3bdd8206c..91247182bc52 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -80,6 +80,7 @@ static struct phy_driver teranetics_driver[] = {
80 .phy_id = PHY_ID_TN2020, 80 .phy_id = PHY_ID_TN2020,
81 .phy_id_mask = 0xffffffff, 81 .phy_id_mask = 0xffffffff,
82 .name = "Teranetics TN2020", 82 .name = "Teranetics TN2020",
83 .features = PHY_10GBIT_FEATURES,
83 .soft_reset = gen10g_no_soft_reset, 84 .soft_reset = gen10g_no_soft_reset,
84 .aneg_done = teranetics_aneg_done, 85 .aneg_done = teranetics_aneg_done,
85 .config_init = gen10g_config_init, 86 .config_init = gen10g_config_init,
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 74a8782313cf..bd6084e315de 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
44 u16 val = 0; 44 u16 val = 0;
45 int err; 45 int err;
46 46
47 err = priv->phy_drv->read_status(phydev); 47 if (priv->phy_drv->read_status)
48 err = priv->phy_drv->read_status(phydev);
49 else
50 err = genphy_read_status(phydev);
48 if (err < 0) 51 if (err < 0)
49 return err; 52 return err;
50 53
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 62dc564b251d..f22639f0116a 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
445 if (pskb_trim_rcsum(skb, len)) 445 if (pskb_trim_rcsum(skb, len))
446 goto drop; 446 goto drop;
447 447
448 ph = pppoe_hdr(skb);
448 pn = pppoe_pernet(dev_net(dev)); 449 pn = pppoe_pernet(dev_net(dev));
449 450
450 /* Note that get_item does a sock_hold(), so sk_pppox(po) 451 /* Note that get_item does a sock_hold(), so sk_pppox(po)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index afd9d25d1992..6ce3f666d142 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
256 } 256 }
257} 257}
258 258
259static bool __team_option_inst_tmp_find(const struct list_head *opts,
260 const struct team_option_inst *needle)
261{
262 struct team_option_inst *opt_inst;
263
264 list_for_each_entry(opt_inst, opts, tmp_list)
265 if (opt_inst == needle)
266 return true;
267 return false;
268}
269
270static int __team_options_register(struct team *team, 259static int __team_options_register(struct team *team,
271 const struct team_option *option, 260 const struct team_option *option,
272 size_t option_count) 261 size_t option_count)
@@ -1267,7 +1256,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1267 list_add_tail_rcu(&port->list, &team->port_list); 1256 list_add_tail_rcu(&port->list, &team->port_list);
1268 team_port_enable(team, port); 1257 team_port_enable(team, port);
1269 __team_compute_features(team); 1258 __team_compute_features(team);
1270 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); 1259 __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1271 __team_options_change_check(team); 1260 __team_options_change_check(team);
1272 1261
1273 netdev_info(dev, "Port device %s added\n", portname); 1262 netdev_info(dev, "Port device %s added\n", portname);
@@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2460 int err = 0; 2449 int err = 0;
2461 int i; 2450 int i;
2462 struct nlattr *nl_option; 2451 struct nlattr *nl_option;
2463 LIST_HEAD(opt_inst_list);
2464 2452
2465 rtnl_lock(); 2453 rtnl_lock();
2466 2454
@@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2480 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; 2468 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2481 struct nlattr *attr; 2469 struct nlattr *attr;
2482 struct nlattr *attr_data; 2470 struct nlattr *attr_data;
2471 LIST_HEAD(opt_inst_list);
2483 enum team_option_type opt_type; 2472 enum team_option_type opt_type;
2484 int opt_port_ifindex = 0; /* != 0 for per-port options */ 2473 int opt_port_ifindex = 0; /* != 0 for per-port options */
2485 u32 opt_array_index = 0; 2474 u32 opt_array_index = 0;
@@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2584 if (err) 2573 if (err)
2585 goto team_put; 2574 goto team_put;
2586 opt_inst->changed = true; 2575 opt_inst->changed = true;
2587
2588 /* dumb/evil user-space can send us duplicate opt,
2589 * keep only the last one
2590 */
2591 if (__team_option_inst_tmp_find(&opt_inst_list,
2592 opt_inst))
2593 continue;
2594
2595 list_add(&opt_inst->tmp_list, &opt_inst_list); 2576 list_add(&opt_inst->tmp_list, &opt_inst_list);
2596 } 2577 }
2597 if (!opt_found) { 2578 if (!opt_found) {
2598 err = -ENOENT; 2579 err = -ENOENT;
2599 goto team_put; 2580 goto team_put;
2600 } 2581 }
2601 }
2602 2582
2603 err = team_nl_send_event_options_get(team, &opt_inst_list); 2583 err = team_nl_send_event_options_get(team, &opt_inst_list);
2584 if (err)
2585 break;
2586 }
2604 2587
2605team_put: 2588team_put:
2606 team_nl_team_put(team); 2589 team_nl_team_put(team);
@@ -2932,7 +2915,7 @@ static int team_device_event(struct notifier_block *unused,
2932 2915
2933 switch (event) { 2916 switch (event) {
2934 case NETDEV_UP: 2917 case NETDEV_UP:
2935 if (netif_carrier_ok(dev)) 2918 if (netif_oper_up(dev))
2936 team_port_change_check(port, true); 2919 team_port_change_check(port, true);
2937 break; 2920 break;
2938 case NETDEV_DOWN: 2921 case NETDEV_DOWN:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a4fdad475594..fed298c0cb39 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -856,10 +856,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
856 err = 0; 856 err = 0;
857 } 857 }
858 858
859 rcu_assign_pointer(tfile->tun, tun);
860 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
861 tun->numqueues++;
862
863 if (tfile->detached) { 859 if (tfile->detached) {
864 tun_enable_queue(tfile); 860 tun_enable_queue(tfile);
865 } else { 861 } else {
@@ -870,12 +866,18 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
870 if (rtnl_dereference(tun->xdp_prog)) 866 if (rtnl_dereference(tun->xdp_prog))
871 sock_set_flag(&tfile->sk, SOCK_XDP); 867 sock_set_flag(&tfile->sk, SOCK_XDP);
872 868
873 tun_set_real_num_queues(tun);
874
875 /* device is allowed to go away first, so no need to hold extra 869 /* device is allowed to go away first, so no need to hold extra
876 * refcnt. 870 * refcnt.
877 */ 871 */
878 872
873 /* Publish tfile->tun and tun->tfiles only after we've fully
874 * initialized tfile; otherwise we risk using half-initialized
875 * object.
876 */
877 rcu_assign_pointer(tfile->tun, tun);
878 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
879 tun->numqueues++;
880 tun_set_real_num_queues(tun);
879out: 881out:
880 return err; 882 return err;
881} 883}
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 57f1c94fca0b..820a2fe7d027 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1287,6 +1287,20 @@ static const struct driver_info asix112_info = {
1287 1287
1288#undef ASIX112_DESC 1288#undef ASIX112_DESC
1289 1289
1290static const struct driver_info trendnet_info = {
1291 .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter",
1292 .bind = aqc111_bind,
1293 .unbind = aqc111_unbind,
1294 .status = aqc111_status,
1295 .link_reset = aqc111_link_reset,
1296 .reset = aqc111_reset,
1297 .stop = aqc111_stop,
1298 .flags = FLAG_ETHER | FLAG_FRAMING_AX |
1299 FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
1300 .rx_fixup = aqc111_rx_fixup,
1301 .tx_fixup = aqc111_tx_fixup,
1302};
1303
1290static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) 1304static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
1291{ 1305{
1292 struct usbnet *dev = usb_get_intfdata(intf); 1306 struct usbnet *dev = usb_get_intfdata(intf);
@@ -1440,6 +1454,7 @@ static const struct usb_device_id products[] = {
1440 {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, 1454 {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)},
1441 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, 1455 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
1442 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, 1456 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
1457 {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
1443 { },/* END */ 1458 { },/* END */
1444}; 1459};
1445MODULE_DEVICE_TABLE(usb, products); 1460MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b654f05b2ccd..3d93993e74da 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
739 asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); 739 asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
740 chipcode &= AX_CHIPCODE_MASK; 740 chipcode &= AX_CHIPCODE_MASK;
741 741
742 (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : 742 ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
743 ax88772a_hw_reset(dev, 0); 743 ax88772a_hw_reset(dev, 0);
744
745 if (ret < 0) {
746 netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
747 return ret;
748 }
744 749
745 /* Read PHYID register *AFTER* the PHY was reset properly */ 750 /* Read PHYID register *AFTER* the PHY was reset properly */
746 phyid = asix_get_phyid(dev); 751 phyid = asix_get_phyid(dev);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index b3b3c05903a1..5512a1038721 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -179,10 +179,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
179 * probed with) and a slave/data interface; union 179 * probed with) and a slave/data interface; union
180 * descriptors sort this all out. 180 * descriptors sort this all out.
181 */ 181 */
182 info->control = usb_ifnum_to_if(dev->udev, 182 info->control = usb_ifnum_to_if(dev->udev, info->u->bMasterInterface0);
183 info->u->bMasterInterface0); 183 info->data = usb_ifnum_to_if(dev->udev, info->u->bSlaveInterface0);
184 info->data = usb_ifnum_to_if(dev->udev,
185 info->u->bSlaveInterface0);
186 if (!info->control || !info->data) { 184 if (!info->control || !info->data) {
187 dev_dbg(&intf->dev, 185 dev_dbg(&intf->dev,
188 "master #%u/%p slave #%u/%p\n", 186 "master #%u/%p slave #%u/%p\n",
@@ -216,18 +214,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
216 /* a data interface altsetting does the real i/o */ 214 /* a data interface altsetting does the real i/o */
217 d = &info->data->cur_altsetting->desc; 215 d = &info->data->cur_altsetting->desc;
218 if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { 216 if (d->bInterfaceClass != USB_CLASS_CDC_DATA) {
219 dev_dbg(&intf->dev, "slave class %u\n", 217 dev_dbg(&intf->dev, "slave class %u\n", d->bInterfaceClass);
220 d->bInterfaceClass);
221 goto bad_desc; 218 goto bad_desc;
222 } 219 }
223skip: 220skip:
224 if ( rndis && 221 if (rndis && header.usb_cdc_acm_descriptor &&
225 header.usb_cdc_acm_descriptor && 222 header.usb_cdc_acm_descriptor->bmCapabilities) {
226 header.usb_cdc_acm_descriptor->bmCapabilities) { 223 dev_dbg(&intf->dev,
227 dev_dbg(&intf->dev, 224 "ACM capabilities %02x, not really RNDIS?\n",
228 "ACM capabilities %02x, not really RNDIS?\n", 225 header.usb_cdc_acm_descriptor->bmCapabilities);
229 header.usb_cdc_acm_descriptor->bmCapabilities); 226 goto bad_desc;
230 goto bad_desc;
231 } 227 }
232 228
233 if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { 229 if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
@@ -238,7 +234,7 @@ skip:
238 } 234 }
239 235
240 if (header.usb_cdc_mdlm_desc && 236 if (header.usb_cdc_mdlm_desc &&
241 memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) { 237 memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) {
242 dev_dbg(&intf->dev, "GUID doesn't match\n"); 238 dev_dbg(&intf->dev, "GUID doesn't match\n");
243 goto bad_desc; 239 goto bad_desc;
244 } 240 }
@@ -302,7 +298,7 @@ skip:
302 if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { 298 if (info->control->cur_altsetting->desc.bNumEndpoints == 1) {
303 struct usb_endpoint_descriptor *desc; 299 struct usb_endpoint_descriptor *desc;
304 300
305 dev->status = &info->control->cur_altsetting->endpoint [0]; 301 dev->status = &info->control->cur_altsetting->endpoint[0];
306 desc = &dev->status->desc; 302 desc = &dev->status->desc;
307 if (!usb_endpoint_is_int_in(desc) || 303 if (!usb_endpoint_is_int_in(desc) ||
308 (le16_to_cpu(desc->wMaxPacketSize) 304 (le16_to_cpu(desc->wMaxPacketSize)
@@ -847,6 +843,14 @@ static const struct usb_device_id products[] = {
847 .driver_info = 0, 843 .driver_info = 0,
848}, 844},
849 845
846/* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */
847{
848 USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM,
849 USB_CDC_SUBCLASS_ETHERNET,
850 USB_CDC_PROTO_NONE),
851 .driver_info = 0,
852},
853
850/* WHITELIST!!! 854/* WHITELIST!!!
851 * 855 *
852 * CDC Ether uses two interfaces, not necessarily consecutive. 856 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 774e1ff01c9a..18af2f8eee96 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev)
123 dev->addr_len = 0; 123 dev->addr_len = 0;
124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
125 dev->netdev_ops = &qmimux_netdev_ops; 125 dev->netdev_ops = &qmimux_netdev_ops;
126 dev->mtu = 1500;
126 dev->needs_free_netdev = true; 127 dev->needs_free_netdev = true;
127} 128}
128 129
@@ -1200,8 +1201,8 @@ static const struct usb_device_id products[] = {
1200 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1201 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1201 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 1202 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
1202 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 1203 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
1203 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ 1204 {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
1204 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ 1205 {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
1205 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 1206 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
1206 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 1207 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
1207 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 1208 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 60dd1ec1665f..86c8c64fbb0f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -557,6 +557,7 @@ enum spd_duplex {
557/* MAC PASSTHRU */ 557/* MAC PASSTHRU */
558#define AD_MASK 0xfee0 558#define AD_MASK 0xfee0
559#define BND_MASK 0x0004 559#define BND_MASK 0x0004
560#define BD_MASK 0x0001
560#define EFUSE 0xcfdb 561#define EFUSE 0xcfdb
561#define PASS_THRU_MASK 0x1 562#define PASS_THRU_MASK 0x1
562 563
@@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
1176 return -ENODEV; 1177 return -ENODEV;
1177 } 1178 }
1178 } else { 1179 } else {
1179 /* test for RTL8153-BND */ 1180 /* test for RTL8153-BND and RTL8153-BD */
1180 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); 1181 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
1181 if ((ocp_data & BND_MASK) == 0) { 1182 if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
1182 netif_dbg(tp, probe, tp->netdev, 1183 netif_dbg(tp, probe, tp->netdev,
1183 "Invalid variant for MAC pass through\n"); 1184 "Invalid variant for MAC pass through\n");
1184 return -ENODEV; 1185 return -ENODEV;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 023725086046..4cfceb789eea 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
57#define VIRTIO_XDP_TX BIT(0) 57#define VIRTIO_XDP_TX BIT(0)
58#define VIRTIO_XDP_REDIR BIT(1) 58#define VIRTIO_XDP_REDIR BIT(1)
59 59
60#define VIRTIO_XDP_FLAG BIT(0)
61
60/* RX packet size EWMA. The average packet size is used to determine the packet 62/* RX packet size EWMA. The average packet size is used to determine the packet
61 * buffer size when refilling RX rings. As the entire RX ring may be refilled 63 * buffer size when refilling RX rings. As the entire RX ring may be refilled
62 * at once, the weight is chosen so that the EWMA will be insensitive to short- 64 * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -252,6 +254,21 @@ struct padded_vnet_hdr {
252 char padding[4]; 254 char padding[4];
253}; 255};
254 256
257static bool is_xdp_frame(void *ptr)
258{
259 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
260}
261
262static void *xdp_to_ptr(struct xdp_frame *ptr)
263{
264 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
265}
266
267static struct xdp_frame *ptr_to_xdp(void *ptr)
268{
269 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
270}
271
255/* Converting between virtqueue no. and kernel tx/rx queue no. 272/* Converting between virtqueue no. and kernel tx/rx queue no.
256 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 273 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
257 */ 274 */
@@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
462 479
463 sg_init_one(sq->sg, xdpf->data, xdpf->len); 480 sg_init_one(sq->sg, xdpf->data, xdpf->len);
464 481
465 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); 482 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
483 GFP_ATOMIC);
466 if (unlikely(err)) 484 if (unlikely(err))
467 return -ENOSPC; /* Caller handle free/refcnt */ 485 return -ENOSPC; /* Caller handle free/refcnt */
468 486
@@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev,
482{ 500{
483 struct virtnet_info *vi = netdev_priv(dev); 501 struct virtnet_info *vi = netdev_priv(dev);
484 struct receive_queue *rq = vi->rq; 502 struct receive_queue *rq = vi->rq;
485 struct xdp_frame *xdpf_sent;
486 struct bpf_prog *xdp_prog; 503 struct bpf_prog *xdp_prog;
487 struct send_queue *sq; 504 struct send_queue *sq;
488 unsigned int len; 505 unsigned int len;
506 int packets = 0;
507 int bytes = 0;
489 int drops = 0; 508 int drops = 0;
490 int kicks = 0; 509 int kicks = 0;
491 int ret, err; 510 int ret, err;
511 void *ptr;
492 int i; 512 int i;
493 513
494 sq = virtnet_xdp_sq(vi);
495
496 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
497 ret = -EINVAL;
498 drops = n;
499 goto out;
500 }
501
502 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 514 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
503 * indicate XDP resources have been successfully allocated. 515 * indicate XDP resources have been successfully allocated.
504 */ 516 */
505 xdp_prog = rcu_dereference(rq->xdp_prog); 517 xdp_prog = rcu_dereference(rq->xdp_prog);
506 if (!xdp_prog) { 518 if (!xdp_prog)
507 ret = -ENXIO; 519 return -ENXIO;
520
521 sq = virtnet_xdp_sq(vi);
522
523 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
524 ret = -EINVAL;
508 drops = n; 525 drops = n;
509 goto out; 526 goto out;
510 } 527 }
511 528
512 /* Free up any pending old buffers before queueing new ones. */ 529 /* Free up any pending old buffers before queueing new ones. */
513 while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) 530 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
514 xdp_return_frame(xdpf_sent); 531 if (likely(is_xdp_frame(ptr))) {
532 struct xdp_frame *frame = ptr_to_xdp(ptr);
533
534 bytes += frame->len;
535 xdp_return_frame(frame);
536 } else {
537 struct sk_buff *skb = ptr;
538
539 bytes += skb->len;
540 napi_consume_skb(skb, false);
541 }
542 packets++;
543 }
515 544
516 for (i = 0; i < n; i++) { 545 for (i = 0; i < n; i++) {
517 struct xdp_frame *xdpf = frames[i]; 546 struct xdp_frame *xdpf = frames[i];
@@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
530 } 559 }
531out: 560out:
532 u64_stats_update_begin(&sq->stats.syncp); 561 u64_stats_update_begin(&sq->stats.syncp);
562 sq->stats.bytes += bytes;
563 sq->stats.packets += packets;
533 sq->stats.xdp_tx += n; 564 sq->stats.xdp_tx += n;
534 sq->stats.xdp_tx_drops += drops; 565 sq->stats.xdp_tx_drops += drops;
535 sq->stats.kicks += kicks; 566 sq->stats.kicks += kicks;
@@ -1330,20 +1361,28 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
1330 return stats.packets; 1361 return stats.packets;
1331} 1362}
1332 1363
1333static void free_old_xmit_skbs(struct send_queue *sq) 1364static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1334{ 1365{
1335 struct sk_buff *skb;
1336 unsigned int len; 1366 unsigned int len;
1337 unsigned int packets = 0; 1367 unsigned int packets = 0;
1338 unsigned int bytes = 0; 1368 unsigned int bytes = 0;
1369 void *ptr;
1339 1370
1340 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 1371 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1341 pr_debug("Sent skb %p\n", skb); 1372 if (likely(!is_xdp_frame(ptr))) {
1373 struct sk_buff *skb = ptr;
1342 1374
1343 bytes += skb->len; 1375 pr_debug("Sent skb %p\n", skb);
1344 packets++; 1376
1377 bytes += skb->len;
1378 napi_consume_skb(skb, in_napi);
1379 } else {
1380 struct xdp_frame *frame = ptr_to_xdp(ptr);
1345 1381
1346 dev_consume_skb_any(skb); 1382 bytes += frame->len;
1383 xdp_return_frame(frame);
1384 }
1385 packets++;
1347 } 1386 }
1348 1387
1349 /* Avoid overhead when no packets have been processed 1388 /* Avoid overhead when no packets have been processed
@@ -1358,6 +1397,16 @@ static void free_old_xmit_skbs(struct send_queue *sq)
1358 u64_stats_update_end(&sq->stats.syncp); 1397 u64_stats_update_end(&sq->stats.syncp);
1359} 1398}
1360 1399
1400static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1401{
1402 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1403 return false;
1404 else if (q < vi->curr_queue_pairs)
1405 return true;
1406 else
1407 return false;
1408}
1409
1361static void virtnet_poll_cleantx(struct receive_queue *rq) 1410static void virtnet_poll_cleantx(struct receive_queue *rq)
1362{ 1411{
1363 struct virtnet_info *vi = rq->vq->vdev->priv; 1412 struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1365,11 +1414,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
1365 struct send_queue *sq = &vi->sq[index]; 1414 struct send_queue *sq = &vi->sq[index];
1366 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 1415 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1367 1416
1368 if (!sq->napi.weight) 1417 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
1369 return; 1418 return;
1370 1419
1371 if (__netif_tx_trylock(txq)) { 1420 if (__netif_tx_trylock(txq)) {
1372 free_old_xmit_skbs(sq); 1421 free_old_xmit_skbs(sq, true);
1373 __netif_tx_unlock(txq); 1422 __netif_tx_unlock(txq);
1374 } 1423 }
1375 1424
@@ -1442,10 +1491,18 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1442{ 1491{
1443 struct send_queue *sq = container_of(napi, struct send_queue, napi); 1492 struct send_queue *sq = container_of(napi, struct send_queue, napi);
1444 struct virtnet_info *vi = sq->vq->vdev->priv; 1493 struct virtnet_info *vi = sq->vq->vdev->priv;
1445 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); 1494 unsigned int index = vq2txq(sq->vq);
1495 struct netdev_queue *txq;
1446 1496
1497 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
1498 /* We don't need to enable cb for XDP */
1499 napi_complete_done(napi, 0);
1500 return 0;
1501 }
1502
1503 txq = netdev_get_tx_queue(vi->dev, index);
1447 __netif_tx_lock(txq, raw_smp_processor_id()); 1504 __netif_tx_lock(txq, raw_smp_processor_id());
1448 free_old_xmit_skbs(sq); 1505 free_old_xmit_skbs(sq, true);
1449 __netif_tx_unlock(txq); 1506 __netif_tx_unlock(txq);
1450 1507
1451 virtqueue_napi_complete(napi, sq->vq, 0); 1508 virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1514,7 +1571,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1514 bool use_napi = sq->napi.weight; 1571 bool use_napi = sq->napi.weight;
1515 1572
1516 /* Free up any pending old buffers before queueing new ones. */ 1573 /* Free up any pending old buffers before queueing new ones. */
1517 free_old_xmit_skbs(sq); 1574 free_old_xmit_skbs(sq, false);
1518 1575
1519 if (use_napi && kick) 1576 if (use_napi && kick)
1520 virtqueue_enable_cb_delayed(sq->vq); 1577 virtqueue_enable_cb_delayed(sq->vq);
@@ -1557,7 +1614,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1557 if (!use_napi && 1614 if (!use_napi &&
1558 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 1615 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1559 /* More just got used, free them then recheck. */ 1616 /* More just got used, free them then recheck. */
1560 free_old_xmit_skbs(sq); 1617 free_old_xmit_skbs(sq, false);
1561 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1618 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1562 netif_start_subqueue(dev, qnum); 1619 netif_start_subqueue(dev, qnum);
1563 virtqueue_disable_cb(sq->vq); 1620 virtqueue_disable_cb(sq->vq);
@@ -2395,6 +2452,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2395 return -ENOMEM; 2452 return -ENOMEM;
2396 } 2453 }
2397 2454
2455 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
2456 if (!prog && !old_prog)
2457 return 0;
2458
2398 if (prog) { 2459 if (prog) {
2399 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); 2460 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
2400 if (IS_ERR(prog)) 2461 if (IS_ERR(prog))
@@ -2402,36 +2463,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2402 } 2463 }
2403 2464
2404 /* Make sure NAPI is not using any XDP TX queues for RX. */ 2465 /* Make sure NAPI is not using any XDP TX queues for RX. */
2405 if (netif_running(dev)) 2466 if (netif_running(dev)) {
2406 for (i = 0; i < vi->max_queue_pairs; i++) 2467 for (i = 0; i < vi->max_queue_pairs; i++) {
2407 napi_disable(&vi->rq[i].napi); 2468 napi_disable(&vi->rq[i].napi);
2469 virtnet_napi_tx_disable(&vi->sq[i].napi);
2470 }
2471 }
2472
2473 if (!prog) {
2474 for (i = 0; i < vi->max_queue_pairs; i++) {
2475 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2476 if (i == 0)
2477 virtnet_restore_guest_offloads(vi);
2478 }
2479 synchronize_net();
2480 }
2408 2481
2409 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2410 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 2482 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2411 if (err) 2483 if (err)
2412 goto err; 2484 goto err;
2485 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2413 vi->xdp_queue_pairs = xdp_qp; 2486 vi->xdp_queue_pairs = xdp_qp;
2414 2487
2415 for (i = 0; i < vi->max_queue_pairs; i++) { 2488 if (prog) {
2416 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 2489 for (i = 0; i < vi->max_queue_pairs; i++) {
2417 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 2490 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2418 if (i == 0) { 2491 if (i == 0 && !old_prog)
2419 if (!old_prog)
2420 virtnet_clear_guest_offloads(vi); 2492 virtnet_clear_guest_offloads(vi);
2421 if (!prog)
2422 virtnet_restore_guest_offloads(vi);
2423 } 2493 }
2494 }
2495
2496 for (i = 0; i < vi->max_queue_pairs; i++) {
2424 if (old_prog) 2497 if (old_prog)
2425 bpf_prog_put(old_prog); 2498 bpf_prog_put(old_prog);
2426 if (netif_running(dev)) 2499 if (netif_running(dev)) {
2427 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2500 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2501 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2502 &vi->sq[i].napi);
2503 }
2428 } 2504 }
2429 2505
2430 return 0; 2506 return 0;
2431 2507
2432err: 2508err:
2433 for (i = 0; i < vi->max_queue_pairs; i++) 2509 if (!prog) {
2434 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2510 virtnet_clear_guest_offloads(vi);
2511 for (i = 0; i < vi->max_queue_pairs; i++)
2512 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
2513 }
2514
2515 if (netif_running(dev)) {
2516 for (i = 0; i < vi->max_queue_pairs; i++) {
2517 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2518 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2519 &vi->sq[i].napi);
2520 }
2521 }
2435 if (prog) 2522 if (prog)
2436 bpf_prog_sub(prog, vi->max_queue_pairs - 1); 2523 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2437 return err; 2524 return err;
@@ -2613,16 +2700,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
2613 put_page(vi->rq[i].alloc_frag.page); 2700 put_page(vi->rq[i].alloc_frag.page);
2614} 2701}
2615 2702
2616static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
2617{
2618 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
2619 return false;
2620 else if (q < vi->curr_queue_pairs)
2621 return true;
2622 else
2623 return false;
2624}
2625
2626static void free_unused_bufs(struct virtnet_info *vi) 2703static void free_unused_bufs(struct virtnet_info *vi)
2627{ 2704{
2628 void *buf; 2705 void *buf;
@@ -2631,10 +2708,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
2631 for (i = 0; i < vi->max_queue_pairs; i++) { 2708 for (i = 0; i < vi->max_queue_pairs; i++) {
2632 struct virtqueue *vq = vi->sq[i].vq; 2709 struct virtqueue *vq = vi->sq[i].vq;
2633 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 2710 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2634 if (!is_xdp_raw_buffer_queue(vi, i)) 2711 if (!is_xdp_frame(buf))
2635 dev_kfree_skb(buf); 2712 dev_kfree_skb(buf);
2636 else 2713 else
2637 put_page(virt_to_head_page(buf)); 2714 xdp_return_frame(ptr_to_xdp(buf));
2638 } 2715 }
2639 } 2716 }
2640 2717
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e454dfc9ad8f..89984fcab01e 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -535,8 +535,8 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
535 } 535 }
536 536
537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); 537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
538 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, 538 tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
539 &tq->buf_info_pa, GFP_KERNEL); 539 &tq->buf_info_pa, GFP_KERNEL);
540 if (!tq->buf_info) 540 if (!tq->buf_info)
541 goto err; 541 goto err;
542 542
@@ -1815,8 +1815,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1815 1815
1816 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1816 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1817 rq->rx_ring[1].size); 1817 rq->rx_ring[1].size);
1818 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, 1818 bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1819 GFP_KERNEL); 1819 GFP_KERNEL);
1820 if (!bi) 1820 if (!bi)
1821 goto err; 1821 goto err;
1822 1822
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 95909e262ba4..7c1430ed0244 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev)
1273 1273
1274 /* default to no qdisc; user can add if desired */ 1274 /* default to no qdisc; user can add if desired */
1275 dev->priv_flags |= IFF_NO_QUEUE; 1275 dev->priv_flags |= IFF_NO_QUEUE;
1276
1277 dev->min_mtu = 0;
1278 dev->max_mtu = 0;
1276} 1279}
1277 1280
1278static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], 1281static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5209ee9aac47..2aae11feff0c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2219,7 +2219,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2219 struct pcpu_sw_netstats *tx_stats, *rx_stats; 2219 struct pcpu_sw_netstats *tx_stats, *rx_stats;
2220 union vxlan_addr loopback; 2220 union vxlan_addr loopback;
2221 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 2221 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
2222 struct net_device *dev = skb->dev; 2222 struct net_device *dev;
2223 int len = skb->len; 2223 int len = skb->len;
2224 2224
2225 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 2225 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
@@ -2239,9 +2239,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2239#endif 2239#endif
2240 } 2240 }
2241 2241
2242 rcu_read_lock();
2243 dev = skb->dev;
2244 if (unlikely(!(dev->flags & IFF_UP))) {
2245 kfree_skb(skb);
2246 goto drop;
2247 }
2248
2242 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) 2249 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
2243 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, 2250 vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
2244 vni);
2245 2251
2246 u64_stats_update_begin(&tx_stats->syncp); 2252 u64_stats_update_begin(&tx_stats->syncp);
2247 tx_stats->tx_packets++; 2253 tx_stats->tx_packets++;
@@ -2254,8 +2260,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2254 rx_stats->rx_bytes += len; 2260 rx_stats->rx_bytes += len;
2255 u64_stats_update_end(&rx_stats->syncp); 2261 u64_stats_update_end(&rx_stats->syncp);
2256 } else { 2262 } else {
2263drop:
2257 dev->stats.rx_dropped++; 2264 dev->stats.rx_dropped++;
2258 } 2265 }
2266 rcu_read_unlock();
2259} 2267}
2260 2268
2261static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, 2269static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index c0b0f525c87c..27decf8ae840 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1575,7 +1575,7 @@ try:
1575 dev->stats.tx_packets++; 1575 dev->stats.tx_packets++;
1576 dev->stats.tx_bytes += skb->len; 1576 dev->stats.tx_bytes += skb->len;
1577 } 1577 }
1578 dev_kfree_skb_irq(skb); 1578 dev_consume_skb_irq(skb);
1579 dpriv->tx_skbuff[cur] = NULL; 1579 dpriv->tx_skbuff[cur] = NULL;
1580 ++dpriv->tx_dirty; 1580 ++dpriv->tx_dirty;
1581 } else { 1581 } else {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 839fa7715709..a08f04c3f644 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -279,10 +279,9 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); 279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
280 280
281 /* Get BD buffer */ 281 /* Get BD buffer */
282 bd_buffer = dma_zalloc_coherent(priv->dev, 282 bd_buffer = dma_alloc_coherent(priv->dev,
283 (RX_BD_RING_LEN + TX_BD_RING_LEN) * 283 (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
284 MAX_RX_BUF_LENGTH, 284 &bd_dma_addr, GFP_KERNEL);
285 &bd_dma_addr, GFP_KERNEL);
286 285
287 if (!bd_buffer) { 286 if (!bd_buffer) {
288 dev_err(priv->dev, "Could not allocate buffer descriptors\n"); 287 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
@@ -483,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
483 memset(priv->tx_buffer + 482 memset(priv->tx_buffer +
484 (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 483 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
485 0, skb->len); 484 0, skb->len);
486 dev_kfree_skb_irq(skb); 485 dev_consume_skb_irq(skb);
487 486
488 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 487 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
489 priv->skb_dirtytx = 488 priv->skb_dirtytx =
@@ -1057,6 +1056,54 @@ static const struct net_device_ops uhdlc_ops = {
1057 .ndo_tx_timeout = uhdlc_tx_timeout, 1056 .ndo_tx_timeout = uhdlc_tx_timeout,
1058}; 1057};
1059 1058
1059static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1060{
1061 struct device_node *np;
1062 struct platform_device *pdev;
1063 struct resource *res;
1064 static int siram_init_flag;
1065 int ret = 0;
1066
1067 np = of_find_compatible_node(NULL, NULL, name);
1068 if (!np)
1069 return -EINVAL;
1070
1071 pdev = of_find_device_by_node(np);
1072 if (!pdev) {
1073 pr_err("%pOFn: failed to lookup pdev\n", np);
1074 of_node_put(np);
1075 return -EINVAL;
1076 }
1077
1078 of_node_put(np);
1079 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1080 if (!res) {
1081 ret = -EINVAL;
1082 goto error_put_device;
1083 }
1084 *ptr = ioremap(res->start, resource_size(res));
1085 if (!*ptr) {
1086 ret = -ENOMEM;
1087 goto error_put_device;
1088 }
1089
1090 /* We've remapped the addresses, and we don't need the device any
1091 * more, so we should release it.
1092 */
1093 put_device(&pdev->dev);
1094
1095 if (init_flag && siram_init_flag == 0) {
1096 memset_io(*ptr, 0, resource_size(res));
1097 siram_init_flag = 1;
1098 }
1099 return 0;
1100
1101error_put_device:
1102 put_device(&pdev->dev);
1103
1104 return ret;
1105}
1106
1060static int ucc_hdlc_probe(struct platform_device *pdev) 1107static int ucc_hdlc_probe(struct platform_device *pdev)
1061{ 1108{
1062 struct device_node *np = pdev->dev.of_node; 1109 struct device_node *np = pdev->dev.of_node;
@@ -1151,6 +1198,15 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
1151 ret = ucc_of_parse_tdm(np, utdm, ut_info); 1198 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1152 if (ret) 1199 if (ret)
1153 goto free_utdm; 1200 goto free_utdm;
1201
1202 ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1203 (void __iomem **)&utdm->si_regs);
1204 if (ret)
1205 goto free_utdm;
1206 ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1207 (void __iomem **)&utdm->siram);
1208 if (ret)
1209 goto unmap_si_regs;
1154 } 1210 }
1155 1211
1156 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) 1212 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
@@ -1159,7 +1215,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
1159 ret = uhdlc_init(uhdlc_priv); 1215 ret = uhdlc_init(uhdlc_priv);
1160 if (ret) { 1216 if (ret) {
1161 dev_err(&pdev->dev, "Failed to init uhdlc\n"); 1217 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1162 goto free_utdm; 1218 goto undo_uhdlc_init;
1163 } 1219 }
1164 1220
1165 dev = alloc_hdlcdev(uhdlc_priv); 1221 dev = alloc_hdlcdev(uhdlc_priv);
@@ -1188,6 +1244,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
1188free_dev: 1244free_dev:
1189 free_netdev(dev); 1245 free_netdev(dev);
1190undo_uhdlc_init: 1246undo_uhdlc_init:
1247 iounmap(utdm->siram);
1248unmap_si_regs:
1249 iounmap(utdm->si_regs);
1191free_utdm: 1250free_utdm:
1192 if (uhdlc_priv->tsa) 1251 if (uhdlc_priv->tsa)
1193 kfree(utdm); 1252 kfree(utdm);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f6d3ecbdd3a3..2a5668b4f6bc 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1553,10 +1553,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1553 * coherent DMA are unsupported 1553 * coherent DMA are unsupported
1554 */ 1554 */
1555 dest_ring->base_addr_owner_space_unaligned = 1555 dest_ring->base_addr_owner_space_unaligned =
1556 dma_zalloc_coherent(ar->dev, 1556 dma_alloc_coherent(ar->dev,
1557 (nentries * sizeof(struct ce_desc) + 1557 (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN),
1558 CE_DESC_RING_ALIGN), 1558 &base_addr, GFP_KERNEL);
1559 &base_addr, GFP_KERNEL);
1560 if (!dest_ring->base_addr_owner_space_unaligned) { 1559 if (!dest_ring->base_addr_owner_space_unaligned) {
1561 kfree(dest_ring); 1560 kfree(dest_ring);
1562 return ERR_PTR(-ENOMEM); 1561 return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 399b501f3c3c..e8891f5fc83a 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
548 { 548 {
549 .id = WCN3990_HW_1_0_DEV_VERSION, 549 .id = WCN3990_HW_1_0_DEV_VERSION,
550 .dev_id = 0, 550 .dev_id = 0,
551 .bus = ATH10K_BUS_PCI, 551 .bus = ATH10K_BUS_SNOC,
552 .name = "wcn3990 hw1.0", 552 .name = "wcn3990 hw1.0",
553 .continuous_frag_desc = true, 553 .continuous_frag_desc = true,
554 .tx_chain_mask = 0x7, 554 .tx_chain_mask = 0x7,
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e49b36752ba2..49758490eaba 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5169,10 +5169,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
5169 if (vif->type == NL80211_IFTYPE_ADHOC || 5169 if (vif->type == NL80211_IFTYPE_ADHOC ||
5170 vif->type == NL80211_IFTYPE_MESH_POINT || 5170 vif->type == NL80211_IFTYPE_MESH_POINT ||
5171 vif->type == NL80211_IFTYPE_AP) { 5171 vif->type == NL80211_IFTYPE_AP) {
5172 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 5172 arvif->beacon_buf = dma_alloc_coherent(ar->dev,
5173 IEEE80211_MAX_FRAME_LEN, 5173 IEEE80211_MAX_FRAME_LEN,
5174 &arvif->beacon_paddr, 5174 &arvif->beacon_paddr,
5175 GFP_ATOMIC); 5175 GFP_ATOMIC);
5176 if (!arvif->beacon_buf) { 5176 if (!arvif->beacon_buf) {
5177 ret = -ENOMEM; 5177 ret = -ENOMEM;
5178 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5178 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 01b4edb00e9e..39e0b1cc2a12 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -936,8 +936,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
936 */ 936 */
937 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 937 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
938 938
939 data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev, 939 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes,
940 alloc_nbytes,
941 &ce_data_base, 940 &ce_data_base,
942 GFP_ATOMIC); 941 GFP_ATOMIC);
943 942
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index ba837403e266..8e236d158ca6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -5193,7 +5193,7 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
5193 void *vaddr; 5193 void *vaddr;
5194 5194
5195 pool_size = num_units * round_up(unit_len, 4); 5195 pool_size = num_units * round_up(unit_len, 4);
5196 vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); 5196 vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
5197 5197
5198 if (!vaddr) 5198 if (!vaddr)
5199 return -ENOMEM; 5199 return -ENOMEM;
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 5ab3e31c9ffa..bab30f7a443c 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -174,9 +174,8 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn
174 int i; 174 int i;
175 175
176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); 176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
177 wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size, 177 wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
178 &wcn_ch->dma_addr, 178 GFP_KERNEL);
179 GFP_KERNEL);
180 if (!wcn_ch->cpu_addr) 179 if (!wcn_ch->cpu_addr)
181 return -ENOMEM; 180 return -ENOMEM;
182 181
@@ -627,9 +626,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
627 16 - (WCN36XX_BD_CHUNK_SIZE % 8); 626 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
628 627
629 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; 628 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
630 cpu_addr = dma_zalloc_coherent(wcn->dev, s, 629 cpu_addr = dma_alloc_coherent(wcn->dev, s,
631 &wcn->mgmt_mem_pool.phy_addr, 630 &wcn->mgmt_mem_pool.phy_addr,
632 GFP_KERNEL); 631 GFP_KERNEL);
633 if (!cpu_addr) 632 if (!cpu_addr)
634 goto out_err; 633 goto out_err;
635 634
@@ -642,9 +641,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
642 16 - (WCN36XX_BD_CHUNK_SIZE % 8); 641 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
643 642
644 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; 643 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
645 cpu_addr = dma_zalloc_coherent(wcn->dev, s, 644 cpu_addr = dma_alloc_coherent(wcn->dev, s,
646 &wcn->data_mem_pool.phy_addr, 645 &wcn->data_mem_pool.phy_addr,
647 GFP_KERNEL); 646 GFP_KERNEL);
648 if (!cpu_addr) 647 if (!cpu_addr)
649 goto out_err; 648 goto out_err;
650 649
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 05a8348bd7b9..3380aaef456c 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -99,7 +99,7 @@ static int wil_sring_alloc(struct wil6210_priv *wil,
99 /* Status messages are allocated and initialized to 0. This is necessary 99 /* Status messages are allocated and initialized to 0. This is necessary
100 * since DR bit should be initialized to 0. 100 * since DR bit should be initialized to 0.
101 */ 101 */
102 sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); 102 sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
103 if (!sring->va) 103 if (!sring->va)
104 return -ENOMEM; 104 return -ENOMEM;
105 105
@@ -381,15 +381,15 @@ static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
381 if (!ring->ctx) 381 if (!ring->ctx)
382 goto err; 382 goto err;
383 383
384 ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); 384 ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
385 if (!ring->va) 385 if (!ring->va)
386 goto err_free_ctx; 386 goto err_free_ctx;
387 387
388 if (ring->is_rx) { 388 if (ring->is_rx) {
389 sz = sizeof(*ring->edma_rx_swtail.va); 389 sz = sizeof(*ring->edma_rx_swtail.va);
390 ring->edma_rx_swtail.va = 390 ring->edma_rx_swtail.va =
391 dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, 391 dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
392 GFP_KERNEL); 392 GFP_KERNEL);
393 if (!ring->edma_rx_swtail.va) 393 if (!ring->edma_rx_swtail.va)
394 goto err_free_va; 394 goto err_free_va;
395 } 395 }
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index dfc4c34298d4..b34e51933257 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
431 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? 431 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
432 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; 432 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
433 433
434 ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, 434 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
435 ring_mem_size, &(ring->dmabase), 435 ring_mem_size, &(ring->dmabase),
436 GFP_KERNEL); 436 GFP_KERNEL);
437 if (!ring->descbase) 437 if (!ring->descbase)
438 return -ENOMEM; 438 return -ENOMEM;
439 439
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index 1b1da7d83652..2ce1537d983c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -331,9 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
331static int alloc_ringmemory(struct b43legacy_dmaring *ring) 331static int alloc_ringmemory(struct b43legacy_dmaring *ring)
332{ 332{
333 /* GFP flags must match the flags in free_ringmemory()! */ 333 /* GFP flags must match the flags in free_ringmemory()! */
334 ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, 334 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
335 B43legacy_DMA_RINGMEMSIZE, 335 B43legacy_DMA_RINGMEMSIZE,
336 &(ring->dmabase), GFP_KERNEL); 336 &(ring->dmabase), GFP_KERNEL);
337 if (!ring->descbase) 337 if (!ring->descbase)
338 return -ENOMEM; 338 return -ENOMEM;
339 339
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 16d7dda965d8..0f69b3fa296e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1281,10 +1281,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1281 u32 addr; 1281 u32 addr;
1282 1282
1283 devinfo->shared.scratch = 1283 devinfo->shared.scratch =
1284 dma_zalloc_coherent(&devinfo->pdev->dev, 1284 dma_alloc_coherent(&devinfo->pdev->dev,
1285 BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1285 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1286 &devinfo->shared.scratch_dmahandle, 1286 &devinfo->shared.scratch_dmahandle,
1287 GFP_KERNEL); 1287 GFP_KERNEL);
1288 if (!devinfo->shared.scratch) 1288 if (!devinfo->shared.scratch)
1289 goto fail; 1289 goto fail;
1290 1290
@@ -1298,10 +1298,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1298 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); 1298 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1299 1299
1300 devinfo->shared.ringupd = 1300 devinfo->shared.ringupd =
1301 dma_zalloc_coherent(&devinfo->pdev->dev, 1301 dma_alloc_coherent(&devinfo->pdev->dev,
1302 BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1302 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1303 &devinfo->shared.ringupd_dmahandle, 1303 &devinfo->shared.ringupd_dmahandle,
1304 GFP_KERNEL); 1304 GFP_KERNEL);
1305 if (!devinfo->shared.ringupd) 1305 if (!devinfo->shared.ringupd)
1306 goto fail; 1306 goto fail;
1307 1307
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 491ca3c8b43c..83d5bceea08f 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -1,6 +1,6 @@
1config IWLWIFI 1config IWLWIFI
2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " 2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
3 depends on PCI && HAS_IOMEM 3 depends on PCI && HAS_IOMEM && CFG80211
4 select FW_LOADER 4 select FW_LOADER
5 ---help--- 5 ---help---
6 Select to build the driver supporting the: 6 Select to build the driver supporting the:
@@ -47,6 +47,7 @@ if IWLWIFI
47config IWLWIFI_LEDS 47config IWLWIFI_LEDS
48 bool 48 bool
49 depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI 49 depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
50 depends on IWLMVM || IWLDVM
50 select LEDS_TRIGGERS 51 select LEDS_TRIGGERS
51 select MAC80211_LEDS 52 select MAC80211_LEDS
52 default y 53 default y
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index e965cc588850..9e850c25877b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -711,30 +711,24 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
711 * Allocate the circular buffer of Read Buffer Descriptors 711 * Allocate the circular buffer of Read Buffer Descriptors
712 * (RBDs) 712 * (RBDs)
713 */ 713 */
714 rxq->bd = dma_zalloc_coherent(dev, 714 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
715 free_size * rxq->queue_size, 715 &rxq->bd_dma, GFP_KERNEL);
716 &rxq->bd_dma, GFP_KERNEL);
717 if (!rxq->bd) 716 if (!rxq->bd)
718 goto err; 717 goto err;
719 718
720 if (trans->cfg->mq_rx_supported) { 719 if (trans->cfg->mq_rx_supported) {
721 rxq->used_bd = dma_zalloc_coherent(dev, 720 rxq->used_bd = dma_alloc_coherent(dev,
722 (use_rx_td ? 721 (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
723 sizeof(*rxq->cd) : 722 &rxq->used_bd_dma,
724 sizeof(__le32)) * 723 GFP_KERNEL);
725 rxq->queue_size,
726 &rxq->used_bd_dma,
727 GFP_KERNEL);
728 if (!rxq->used_bd) 724 if (!rxq->used_bd)
729 goto err; 725 goto err;
730 } 726 }
731 727
732 /* Allocate the driver's pointer to receive buffer status */ 728 /* Allocate the driver's pointer to receive buffer status */
733 rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? 729 rxq->rb_stts = dma_alloc_coherent(dev,
734 sizeof(__le16) : 730 use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status),
735 sizeof(struct iwl_rb_status), 731 &rxq->rb_stts_dma, GFP_KERNEL);
736 &rxq->rb_stts_dma,
737 GFP_KERNEL);
738 if (!rxq->rb_stts) 732 if (!rxq->rb_stts)
739 goto err; 733 goto err;
740 734
@@ -742,16 +736,14 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
742 return 0; 736 return 0;
743 737
744 /* Allocate the driver's pointer to TR tail */ 738 /* Allocate the driver's pointer to TR tail */
745 rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 739 rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
746 &rxq->tr_tail_dma, 740 &rxq->tr_tail_dma, GFP_KERNEL);
747 GFP_KERNEL);
748 if (!rxq->tr_tail) 741 if (!rxq->tr_tail)
749 goto err; 742 goto err;
750 743
751 /* Allocate the driver's pointer to CR tail */ 744 /* Allocate the driver's pointer to CR tail */
752 rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 745 rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
753 &rxq->cr_tail_dma, 746 &rxq->cr_tail_dma, GFP_KERNEL);
754 GFP_KERNEL);
755 if (!rxq->cr_tail) 747 if (!rxq->cr_tail)
756 goto err; 748 goto err;
757 /* 749 /*
@@ -1947,9 +1939,8 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1947 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1939 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1948 1940
1949 trans_pcie->ict_tbl = 1941 trans_pcie->ict_tbl =
1950 dma_zalloc_coherent(trans->dev, ICT_SIZE, 1942 dma_alloc_coherent(trans->dev, ICT_SIZE,
1951 &trans_pcie->ict_tbl_dma, 1943 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
1952 GFP_KERNEL);
1953 if (!trans_pcie->ict_tbl) 1944 if (!trans_pcie->ict_tbl)
1954 return -ENOMEM; 1945 return -ENOMEM;
1955 1946
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3a4b8786f7ea..6359053bd0c7 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2761,6 +2761,11 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2761 BIT(NL80211_CHAN_WIDTH_160); 2761 BIT(NL80211_CHAN_WIDTH_160);
2762 } 2762 }
2763 2763
2764 if (!n_limits) {
2765 err = -EINVAL;
2766 goto failed_hw;
2767 }
2768
2764 data->if_combination.n_limits = n_limits; 2769 data->if_combination.n_limits = n_limits;
2765 data->if_combination.max_interfaces = 2048; 2770 data->if_combination.max_interfaces = 2048;
2766 data->if_combination.limits = data->if_limits; 2771 data->if_combination.limits = data->if_limits;
@@ -3549,7 +3554,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
3549 goto out_err; 3554 goto out_err;
3550 } 3555 }
3551 3556
3552 genlmsg_reply(skb, info); 3557 res = genlmsg_reply(skb, info);
3553 break; 3558 break;
3554 } 3559 }
3555 3560
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 497e762978cc..b2cabce1d74d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -212,24 +212,24 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
212 mt76x02_add_rate_power_offset(t, delta); 212 mt76x02_add_rate_power_offset(t, delta);
213} 213}
214 214
215void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) 215void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp)
216{ 216{
217 struct mt76x0_chan_map { 217 struct mt76x0_chan_map {
218 u8 chan; 218 u8 chan;
219 u8 offset; 219 u8 offset;
220 } chan_map[] = { 220 } chan_map[] = {
221 { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, 221 { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 },
222 { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, 222 { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 },
223 { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, 223 { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 },
224 { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, 224 { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 },
225 { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, 225 { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 },
226 { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, 226 { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 },
227 { 167, 17 }, { 171, 18 }, { 173, 19 }, 227 { 167, 34 }, { 171, 36 }, { 175, 38 },
228 }; 228 };
229 struct ieee80211_channel *chan = dev->mt76.chandef.chan; 229 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
230 u8 offset, addr; 230 u8 offset, addr;
231 int i, idx = 0;
231 u16 data; 232 u16 data;
232 int i;
233 233
234 if (mt76x0_tssi_enabled(dev)) { 234 if (mt76x0_tssi_enabled(dev)) {
235 s8 target_power; 235 s8 target_power;
@@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
239 else 239 else
240 data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); 240 data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
241 target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; 241 target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
242 info[0] = target_power + mt76x0_get_delta(dev); 242 *tp = target_power + mt76x0_get_delta(dev);
243 info[1] = 0;
244 243
245 return; 244 return;
246 } 245 }
247 246
248 for (i = 0; i < ARRAY_SIZE(chan_map); i++) { 247 for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
249 if (chan_map[i].chan <= chan->hw_value) { 248 if (chan->hw_value <= chan_map[i].chan) {
249 idx = (chan->hw_value == chan_map[i].chan);
250 offset = chan_map[i].offset; 250 offset = chan_map[i].offset;
251 break; 251 break;
252 } 252 }
@@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
258 addr = MT_EE_TX_POWER_DELTA_BW80 + offset; 258 addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
259 } else { 259 } else {
260 switch (chan->hw_value) { 260 switch (chan->hw_value) {
261 case 42:
262 offset = 2;
263 break;
261 case 58: 264 case 58:
262 offset = 8; 265 offset = 8;
263 break; 266 break;
264 case 106: 267 case 106:
265 offset = 14; 268 offset = 14;
266 break; 269 break;
267 case 112: 270 case 122:
268 offset = 20; 271 offset = 20;
269 break; 272 break;
270 case 155: 273 case 155:
@@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
277 } 280 }
278 281
279 data = mt76x02_eeprom_get(dev, addr); 282 data = mt76x02_eeprom_get(dev, addr);
280 283 *tp = data >> (8 * idx);
281 info[0] = data; 284 if (*tp < 0 || *tp > 0x3f)
282 if (!info[0] || info[0] > 0x3f) 285 *tp = 5;
283 info[0] = 5;
284
285 info[1] = data >> 8;
286 if (!info[1] || info[1] > 0x3f)
287 info[1] = 5;
288} 286}
289 287
290static int mt76x0_check_eeprom(struct mt76x02_dev *dev) 288static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
index ee9ade9f3c8b..42b259f90b6d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -26,7 +26,7 @@ struct mt76x02_dev;
26int mt76x0_eeprom_init(struct mt76x02_dev *dev); 26int mt76x0_eeprom_init(struct mt76x02_dev *dev);
27void mt76x0_read_rx_gain(struct mt76x02_dev *dev); 27void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
28void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); 28void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
29void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); 29void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp);
30 30
31static inline s8 s6_to_s8(u32 val) 31static inline s8 s6_to_s8(u32 val)
32{ 32{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 1eb1a802ed20..b6166703ad76 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -845,17 +845,17 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
845void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) 845void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
846{ 846{
847 struct mt76_rate_power *t = &dev->mt76.rate_power; 847 struct mt76_rate_power *t = &dev->mt76.rate_power;
848 u8 info[2]; 848 s8 info;
849 849
850 mt76x0_get_tx_power_per_rate(dev); 850 mt76x0_get_tx_power_per_rate(dev);
851 mt76x0_get_power_info(dev, info); 851 mt76x0_get_power_info(dev, &info);
852 852
853 mt76x02_add_rate_power_offset(t, info[0]); 853 mt76x02_add_rate_power_offset(t, info);
854 mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); 854 mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
855 dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); 855 dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
856 mt76x02_add_rate_power_offset(t, -info[0]); 856 mt76x02_add_rate_power_offset(t, -info);
857 857
858 mt76x02_phy_set_txpower(dev, info[0], info[1]); 858 mt76x02_phy_set_txpower(dev, info, info);
859} 859}
860 860
861void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) 861void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 0e6b43bb4678..a5ea3ba495a4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = {
158 .get_txpower = mt76x02_get_txpower, 158 .get_txpower = mt76x02_get_txpower,
159}; 159};
160 160
161static int mt76x0u_register_device(struct mt76x02_dev *dev) 161static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
162{ 162{
163 struct ieee80211_hw *hw = dev->mt76.hw;
164 int err; 163 int err;
165 164
166 err = mt76u_alloc_queues(&dev->mt76);
167 if (err < 0)
168 goto out_err;
169
170 err = mt76u_mcu_init_rx(&dev->mt76);
171 if (err < 0)
172 goto out_err;
173
174 mt76x0_chip_onoff(dev, true, true); 165 mt76x0_chip_onoff(dev, true, true);
175 if (!mt76x02_wait_for_mac(&dev->mt76)) { 166
176 err = -ETIMEDOUT; 167 if (!mt76x02_wait_for_mac(&dev->mt76))
177 goto out_err; 168 return -ETIMEDOUT;
178 }
179 169
180 err = mt76x0u_mcu_init(dev); 170 err = mt76x0u_mcu_init(dev);
181 if (err < 0) 171 if (err < 0)
182 goto out_err; 172 return err;
183 173
184 mt76x0_init_usb_dma(dev); 174 mt76x0_init_usb_dma(dev);
185 err = mt76x0_init_hardware(dev); 175 err = mt76x0_init_hardware(dev);
186 if (err < 0) 176 if (err < 0)
187 goto out_err; 177 return err;
188 178
189 mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); 179 mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
190 mt76_wr(dev, MT_TXOP_CTRL_CFG, 180 mt76_wr(dev, MT_TXOP_CTRL_CFG,
191 FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | 181 FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
192 FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); 182 FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
193 183
184 return 0;
185}
186
187static int mt76x0u_register_device(struct mt76x02_dev *dev)
188{
189 struct ieee80211_hw *hw = dev->mt76.hw;
190 int err;
191
192 err = mt76u_alloc_queues(&dev->mt76);
193 if (err < 0)
194 goto out_err;
195
196 err = mt76u_mcu_init_rx(&dev->mt76);
197 if (err < 0)
198 goto out_err;
199
200 err = mt76x0u_init_hardware(dev);
201 if (err < 0)
202 goto out_err;
203
194 err = mt76x0_register_device(dev); 204 err = mt76x0_register_device(dev);
195 if (err < 0) 205 if (err < 0)
196 goto out_err; 206 goto out_err;
@@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
301 311
302 mt76u_stop_queues(&dev->mt76); 312 mt76u_stop_queues(&dev->mt76);
303 mt76x0u_mac_stop(dev); 313 mt76x0u_mac_stop(dev);
314 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
315 mt76x0_chip_onoff(dev, false, false);
304 usb_kill_urb(usb->mcu.res.urb); 316 usb_kill_urb(usb->mcu.res.urb);
305 317
306 return 0; 318 return 0;
@@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
328 tasklet_enable(&usb->rx_tasklet); 340 tasklet_enable(&usb->rx_tasklet);
329 tasklet_enable(&usb->tx_tasklet); 341 tasklet_enable(&usb->tx_tasklet);
330 342
331 ret = mt76x0_init_hardware(dev); 343 ret = mt76x0u_init_hardware(dev);
332 if (ret) 344 if (ret)
333 goto err; 345 goto err;
334 346
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
index 528cb0401df1..4956a54151cb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
@@ -119,9 +119,9 @@ static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
119 /* 119 /*
120 * Allocate DMA memory for descriptor and buffer. 120 * Allocate DMA memory for descriptor and buffer.
121 */ 121 */
122 addr = dma_zalloc_coherent(rt2x00dev->dev, 122 addr = dma_alloc_coherent(rt2x00dev->dev,
123 queue->limit * queue->desc_size, &dma, 123 queue->limit * queue->desc_size, &dma,
124 GFP_KERNEL); 124 GFP_KERNEL);
125 if (!addr) 125 if (!addr)
126 return -ENOMEM; 126 return -ENOMEM;
127 127
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index bd10165d7eec..4d4b07701149 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
164 } 164 }
165 165
166 sdio_claim_host(func); 166 sdio_claim_host(func);
167 /*
168 * To guarantee that the SDIO card is power cycled, as required to make
169 * the FW programming to succeed, let's do a brute force HW reset.
170 */
171 mmc_hw_reset(card->host);
172
167 sdio_enable_func(func); 173 sdio_enable_func(func);
168 sdio_release_host(func); 174 sdio_release_host(func);
169 175
@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
174{ 180{
175 struct sdio_func *func = dev_to_sdio_func(glue->dev); 181 struct sdio_func *func = dev_to_sdio_func(glue->dev);
176 struct mmc_card *card = func->card; 182 struct mmc_card *card = func->card;
177 int error;
178 183
179 sdio_claim_host(func); 184 sdio_claim_host(func);
180 sdio_disable_func(func); 185 sdio_disable_func(func);
181 sdio_release_host(func); 186 sdio_release_host(func);
182 187
183 /* Let runtime PM know the card is powered off */ 188 /* Let runtime PM know the card is powered off */
184 error = pm_runtime_put(&card->dev); 189 pm_runtime_put(&card->dev);
185 if (error < 0 && error != -EBUSY) {
186 dev_err(&card->dev, "%s failed: %i\n", __func__, error);
187
188 return error;
189 }
190
191 return 0; 190 return 0;
192} 191}
193 192
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 64b218699656..3a93e4d9828b 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -530,8 +530,10 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
530 SET_NETDEV_DEV(dev, &priv->lowerdev->dev); 530 SET_NETDEV_DEV(dev, &priv->lowerdev->dev);
531 dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL); 531 dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL);
532 532
533 if (!dev->ieee80211_ptr) 533 if (!dev->ieee80211_ptr) {
534 err = -ENOMEM;
534 goto remove_handler; 535 goto remove_handler;
536 }
535 537
536 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; 538 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
537 dev->ieee80211_ptr->wiphy = common_wiphy; 539 dev->ieee80211_ptr->wiphy = common_wiphy;
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index 5ee5f40b4dfc..f1eaa3c4d46a 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -1339,10 +1339,10 @@ static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1339 int rc; 1339 int rc;
1340 1340
1341 sndev->nr_rsvd_luts++; 1341 sndev->nr_rsvd_luts++;
1342 sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev, 1342 sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
1343 LUT_SIZE, 1343 LUT_SIZE,
1344 &sndev->self_shared_dma, 1344 &sndev->self_shared_dma,
1345 GFP_KERNEL); 1345 GFP_KERNEL);
1346 if (!sndev->self_shared) { 1346 if (!sndev->self_shared) {
1347 dev_err(&sndev->stdev->dev, 1347 dev_err(&sndev->stdev->dev,
1348 "unable to allocate memory for shared mw\n"); 1348 "unable to allocate memory for shared mw\n");
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 0cf58cabc9ed..3cf50274fadb 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -26,6 +26,12 @@ static int nvdimm_probe(struct device *dev)
26 struct nvdimm_drvdata *ndd; 26 struct nvdimm_drvdata *ndd;
27 int rc; 27 int rc;
28 28
29 rc = nvdimm_security_setup_events(dev);
30 if (rc < 0) {
31 dev_err(dev, "security event setup failed: %d\n", rc);
32 return rc;
33 }
34
29 rc = nvdimm_check_config_data(dev); 35 rc = nvdimm_check_config_data(dev);
30 if (rc) { 36 if (rc) {
31 /* not required for non-aliased nvdimm, ex. NVDIMM-N */ 37 /* not required for non-aliased nvdimm, ex. NVDIMM-N */
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 4890310df874..efe412a6b5b9 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -578,13 +578,25 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
578} 578}
579EXPORT_SYMBOL_GPL(__nvdimm_create); 579EXPORT_SYMBOL_GPL(__nvdimm_create);
580 580
581int nvdimm_security_setup_events(struct nvdimm *nvdimm) 581static void shutdown_security_notify(void *data)
582{ 582{
583 nvdimm->sec.overwrite_state = sysfs_get_dirent(nvdimm->dev.kobj.sd, 583 struct nvdimm *nvdimm = data;
584 "security"); 584
585 sysfs_put(nvdimm->sec.overwrite_state);
586}
587
588int nvdimm_security_setup_events(struct device *dev)
589{
590 struct nvdimm *nvdimm = to_nvdimm(dev);
591
592 if (nvdimm->sec.state < 0 || !nvdimm->sec.ops
593 || !nvdimm->sec.ops->overwrite)
594 return 0;
595 nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
585 if (!nvdimm->sec.overwrite_state) 596 if (!nvdimm->sec.overwrite_state)
586 return -ENODEV; 597 return -ENOMEM;
587 return 0; 598
599 return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
588} 600}
589EXPORT_SYMBOL_GPL(nvdimm_security_setup_events); 601EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
590 602
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 2b2cf4e554d3..e5ffd5733540 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -54,12 +54,12 @@ struct nvdimm {
54}; 54};
55 55
56static inline enum nvdimm_security_state nvdimm_security_state( 56static inline enum nvdimm_security_state nvdimm_security_state(
57 struct nvdimm *nvdimm, bool master) 57 struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
58{ 58{
59 if (!nvdimm->sec.ops) 59 if (!nvdimm->sec.ops)
60 return -ENXIO; 60 return -ENXIO;
61 61
62 return nvdimm->sec.ops->state(nvdimm, master); 62 return nvdimm->sec.ops->state(nvdimm, ptype);
63} 63}
64int nvdimm_security_freeze(struct nvdimm *nvdimm); 64int nvdimm_security_freeze(struct nvdimm *nvdimm);
65#if IS_ENABLED(CONFIG_NVDIMM_KEYS) 65#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index cfde992684e7..379bf4305e61 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -250,6 +250,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
250void nvdimm_set_aliasing(struct device *dev); 250void nvdimm_set_aliasing(struct device *dev);
251void nvdimm_set_locked(struct device *dev); 251void nvdimm_set_locked(struct device *dev);
252void nvdimm_clear_locked(struct device *dev); 252void nvdimm_clear_locked(struct device *dev);
253int nvdimm_security_setup_events(struct device *dev);
253#if IS_ENABLED(CONFIG_NVDIMM_KEYS) 254#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
254int nvdimm_security_unlock(struct device *dev); 255int nvdimm_security_unlock(struct device *dev);
255#else 256#else
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 08f2c92602f4..6a9dd68c0f4f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1253,6 +1253,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1253 * effects say only one namespace is affected. 1253 * effects say only one namespace is affected.
1254 */ 1254 */
1255 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1255 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1256 mutex_lock(&ctrl->scan_lock);
1256 nvme_start_freeze(ctrl); 1257 nvme_start_freeze(ctrl);
1257 nvme_wait_freeze(ctrl); 1258 nvme_wait_freeze(ctrl);
1258 } 1259 }
@@ -1281,8 +1282,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1281 */ 1282 */
1282 if (effects & NVME_CMD_EFFECTS_LBCC) 1283 if (effects & NVME_CMD_EFFECTS_LBCC)
1283 nvme_update_formats(ctrl); 1284 nvme_update_formats(ctrl);
1284 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) 1285 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1285 nvme_unfreeze(ctrl); 1286 nvme_unfreeze(ctrl);
1287 mutex_unlock(&ctrl->scan_lock);
1288 }
1286 if (effects & NVME_CMD_EFFECTS_CCC) 1289 if (effects & NVME_CMD_EFFECTS_CCC)
1287 nvme_init_identify(ctrl); 1290 nvme_init_identify(ctrl);
1288 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) 1291 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
@@ -2173,18 +2176,20 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
2173 size_t nqnlen; 2176 size_t nqnlen;
2174 int off; 2177 int off;
2175 2178
2176 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2179 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2177 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2180 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2178 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2181 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2179 return; 2182 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2180 } 2183 return;
2184 }
2181 2185
2182 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2186 if (ctrl->vs >= NVME_VS(1, 2, 1))
2183 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2187 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2188 }
2184 2189
2185 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 2190 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2186 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2191 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2187 "nqn.2014.08.org.nvmexpress:%4x%4x", 2192 "nqn.2014.08.org.nvmexpress:%04x%04x",
2188 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2193 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2189 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2194 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2190 off += sizeof(id->sn); 2195 off += sizeof(id->sn);
@@ -2500,7 +2505,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2500 ctrl->oaes = le32_to_cpu(id->oaes); 2505 ctrl->oaes = le32_to_cpu(id->oaes);
2501 atomic_set(&ctrl->abort_limit, id->acl + 1); 2506 atomic_set(&ctrl->abort_limit, id->acl + 1);
2502 ctrl->vwc = id->vwc; 2507 ctrl->vwc = id->vwc;
2503 ctrl->cntlid = le16_to_cpup(&id->cntlid);
2504 if (id->mdts) 2508 if (id->mdts)
2505 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 2509 max_hw_sectors = 1 << (id->mdts + page_shift - 9);
2506 else 2510 else
@@ -3400,6 +3404,7 @@ static void nvme_scan_work(struct work_struct *work)
3400 if (nvme_identify_ctrl(ctrl, &id)) 3404 if (nvme_identify_ctrl(ctrl, &id))
3401 return; 3405 return;
3402 3406
3407 mutex_lock(&ctrl->scan_lock);
3403 nn = le32_to_cpu(id->nn); 3408 nn = le32_to_cpu(id->nn);
3404 if (ctrl->vs >= NVME_VS(1, 1, 0) && 3409 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
3405 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 3410 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -3408,6 +3413,7 @@ static void nvme_scan_work(struct work_struct *work)
3408 } 3413 }
3409 nvme_scan_ns_sequential(ctrl, nn); 3414 nvme_scan_ns_sequential(ctrl, nn);
3410out_free_id: 3415out_free_id:
3416 mutex_unlock(&ctrl->scan_lock);
3411 kfree(id); 3417 kfree(id);
3412 down_write(&ctrl->namespaces_rwsem); 3418 down_write(&ctrl->namespaces_rwsem);
3413 list_sort(NULL, &ctrl->namespaces, ns_cmp); 3419 list_sort(NULL, &ctrl->namespaces, ns_cmp);
@@ -3651,6 +3657,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
3651 3657
3652 ctrl->state = NVME_CTRL_NEW; 3658 ctrl->state = NVME_CTRL_NEW;
3653 spin_lock_init(&ctrl->lock); 3659 spin_lock_init(&ctrl->lock);
3660 mutex_init(&ctrl->scan_lock);
3654 INIT_LIST_HEAD(&ctrl->namespaces); 3661 INIT_LIST_HEAD(&ctrl->namespaces);
3655 init_rwsem(&ctrl->namespaces_rwsem); 3662 init_rwsem(&ctrl->namespaces_rwsem);
3656 ctrl->dev = dev; 3663 ctrl->dev = dev;
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index b2ab213f43de..3eb908c50e1a 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -874,6 +874,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
874 if (opts->discovery_nqn) { 874 if (opts->discovery_nqn) {
875 opts->kato = 0; 875 opts->kato = 0;
876 opts->nr_io_queues = 0; 876 opts->nr_io_queues = 0;
877 opts->nr_write_queues = 0;
878 opts->nr_poll_queues = 0;
877 opts->duplicate_connect = true; 879 opts->duplicate_connect = true;
878 } 880 }
879 if (ctrl_loss_tmo < 0) 881 if (ctrl_loss_tmo < 0)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 183ec17ba067..b9fff3b8ed1b 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -545,8 +545,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
545 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); 545 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
546 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + 546 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
547 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); 547 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
548 if (!(ctrl->anacap & (1 << 6))) 548 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
549 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
550 549
551 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { 550 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
552 dev_err(ctrl->device, 551 dev_err(ctrl->device,
@@ -570,6 +569,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
570 return 0; 569 return 0;
571out_free_ana_log_buf: 570out_free_ana_log_buf:
572 kfree(ctrl->ana_log_buf); 571 kfree(ctrl->ana_log_buf);
572 ctrl->ana_log_buf = NULL;
573out: 573out:
574 return error; 574 return error;
575} 575}
@@ -577,5 +577,6 @@ out:
577void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 577void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
578{ 578{
579 kfree(ctrl->ana_log_buf); 579 kfree(ctrl->ana_log_buf);
580 ctrl->ana_log_buf = NULL;
580} 581}
581 582
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 2b36ac922596..c4a1bb41abf0 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -90,6 +90,11 @@ enum nvme_quirks {
90 * Set MEDIUM priority on SQ creation 90 * Set MEDIUM priority on SQ creation
91 */ 91 */
92 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 92 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
93
94 /*
95 * Ignore device provided subnqn.
96 */
97 NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
93}; 98};
94 99
95/* 100/*
@@ -149,6 +154,7 @@ struct nvme_ctrl {
149 enum nvme_ctrl_state state; 154 enum nvme_ctrl_state state;
150 bool identified; 155 bool identified;
151 spinlock_t lock; 156 spinlock_t lock;
157 struct mutex scan_lock;
152 const struct nvme_ctrl_ops *ops; 158 const struct nvme_ctrl_ops *ops;
153 struct request_queue *admin_q; 159 struct request_queue *admin_q;
154 struct request_queue *connect_q; 160 struct request_queue *connect_q;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5a0bf6a24d50..7fee665ec45e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -95,6 +95,7 @@ struct nvme_dev;
95struct nvme_queue; 95struct nvme_queue;
96 96
97static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 97static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
98static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
98 99
99/* 100/*
100 * Represents an NVM Express device. Each nvme_dev is a PCI function. 101 * Represents an NVM Express device. Each nvme_dev is a PCI function.
@@ -1019,9 +1020,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
1019 1020
1020static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 1021static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
1021{ 1022{
1022 if (++nvmeq->cq_head == nvmeq->q_depth) { 1023 if (nvmeq->cq_head == nvmeq->q_depth - 1) {
1023 nvmeq->cq_head = 0; 1024 nvmeq->cq_head = 0;
1024 nvmeq->cq_phase = !nvmeq->cq_phase; 1025 nvmeq->cq_phase = !nvmeq->cq_phase;
1026 } else {
1027 nvmeq->cq_head++;
1025 } 1028 }
1026} 1029}
1027 1030
@@ -1420,6 +1423,14 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1420 return 0; 1423 return 0;
1421} 1424}
1422 1425
1426static void nvme_suspend_io_queues(struct nvme_dev *dev)
1427{
1428 int i;
1429
1430 for (i = dev->ctrl.queue_count - 1; i > 0; i--)
1431 nvme_suspend_queue(&dev->queues[i]);
1432}
1433
1423static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 1434static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1424{ 1435{
1425 struct nvme_queue *nvmeq = &dev->queues[0]; 1436 struct nvme_queue *nvmeq = &dev->queues[0];
@@ -1485,8 +1496,8 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
1485 if (dev->ctrl.queue_count > qid) 1496 if (dev->ctrl.queue_count > qid)
1486 return 0; 1497 return 0;
1487 1498
1488 nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), 1499 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
1489 &nvmeq->cq_dma_addr, GFP_KERNEL); 1500 &nvmeq->cq_dma_addr, GFP_KERNEL);
1490 if (!nvmeq->cqes) 1501 if (!nvmeq->cqes)
1491 goto free_nvmeq; 1502 goto free_nvmeq;
1492 1503
@@ -1885,8 +1896,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
1885 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 1896 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
1886 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; 1897 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
1887 1898
1888 dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i], 1899 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
1889 le64_to_cpu(desc->addr)); 1900 le64_to_cpu(desc->addr),
1901 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1890 } 1902 }
1891 1903
1892 kfree(dev->host_mem_desc_bufs); 1904 kfree(dev->host_mem_desc_bufs);
@@ -1915,8 +1927,8 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
1915 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1927 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
1916 max_entries = dev->ctrl.hmmaxd; 1928 max_entries = dev->ctrl.hmmaxd;
1917 1929
1918 descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), 1930 descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
1919 &descs_dma, GFP_KERNEL); 1931 &descs_dma, GFP_KERNEL);
1920 if (!descs) 1932 if (!descs)
1921 goto out; 1933 goto out;
1922 1934
@@ -1952,8 +1964,9 @@ out_free_bufs:
1952 while (--i >= 0) { 1964 while (--i >= 0) {
1953 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; 1965 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
1954 1966
1955 dma_free_coherent(dev->dev, size, bufs[i], 1967 dma_free_attrs(dev->dev, size, bufs[i],
1956 le64_to_cpu(descs[i].addr)); 1968 le64_to_cpu(descs[i].addr),
1969 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1957 } 1970 }
1958 1971
1959 kfree(bufs); 1972 kfree(bufs);
@@ -2028,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
2028 return ret; 2041 return ret;
2029} 2042}
2030 2043
2044/* irq_queues covers admin queue */
2031static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) 2045static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
2032{ 2046{
2033 unsigned int this_w_queues = write_queues; 2047 unsigned int this_w_queues = write_queues;
2034 2048
2049 WARN_ON(!irq_queues);
2050
2035 /* 2051 /*
2036 * Setup read/write queue split 2052 * Setup read/write queue split, assign admin queue one independent
2053 * irq vector if irq_queues is > 1.
2037 */ 2054 */
2038 if (irq_queues == 1) { 2055 if (irq_queues <= 2) {
2039 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2056 dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2040 dev->io_queues[HCTX_TYPE_READ] = 0; 2057 dev->io_queues[HCTX_TYPE_READ] = 0;
2041 return; 2058 return;
@@ -2043,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
2043 2060
2044 /* 2061 /*
2045 * If 'write_queues' is set, ensure it leaves room for at least 2062 * If 'write_queues' is set, ensure it leaves room for at least
2046 * one read queue 2063 * one read queue and one admin queue
2047 */ 2064 */
2048 if (this_w_queues >= irq_queues) 2065 if (this_w_queues >= irq_queues)
2049 this_w_queues = irq_queues - 1; 2066 this_w_queues = irq_queues - 2;
2050 2067
2051 /* 2068 /*
2052 * If 'write_queues' is set to zero, reads and writes will share 2069 * If 'write_queues' is set to zero, reads and writes will share
2053 * a queue set. 2070 * a queue set.
2054 */ 2071 */
2055 if (!this_w_queues) { 2072 if (!this_w_queues) {
2056 dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; 2073 dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1;
2057 dev->io_queues[HCTX_TYPE_READ] = 0; 2074 dev->io_queues[HCTX_TYPE_READ] = 0;
2058 } else { 2075 } else {
2059 dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; 2076 dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues;
2060 dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; 2077 dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1;
2061 } 2078 }
2062} 2079}
2063 2080
@@ -2082,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2082 this_p_queues = nr_io_queues - 1; 2099 this_p_queues = nr_io_queues - 1;
2083 irq_queues = 1; 2100 irq_queues = 1;
2084 } else { 2101 } else {
2085 irq_queues = nr_io_queues - this_p_queues; 2102 irq_queues = nr_io_queues - this_p_queues + 1;
2086 } 2103 }
2087 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; 2104 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
2088 2105
@@ -2102,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2102 * If we got a failure and we're down to asking for just 2119 * If we got a failure and we're down to asking for just
2103 * 1 + 1 queues, just ask for a single vector. We'll share 2120 * 1 + 1 queues, just ask for a single vector. We'll share
2104 * that between the single IO queue and the admin queue. 2121 * that between the single IO queue and the admin queue.
2122 * Otherwise, we assign one independent vector to admin queue.
2105 */ 2123 */
2106 if (result >= 0 && irq_queues > 1) 2124 if (irq_queues > 1)
2107 irq_queues = irq_sets[0] + irq_sets[1] + 1; 2125 irq_queues = irq_sets[0] + irq_sets[1] + 1;
2108 2126
2109 result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, 2127 result = pci_alloc_irq_vectors_affinity(pdev, irq_queues,
@@ -2132,6 +2150,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2132 return result; 2150 return result;
2133} 2151}
2134 2152
2153static void nvme_disable_io_queues(struct nvme_dev *dev)
2154{
2155 if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq))
2156 __nvme_disable_io_queues(dev, nvme_admin_delete_cq);
2157}
2158
2135static int nvme_setup_io_queues(struct nvme_dev *dev) 2159static int nvme_setup_io_queues(struct nvme_dev *dev)
2136{ 2160{
2137 struct nvme_queue *adminq = &dev->queues[0]; 2161 struct nvme_queue *adminq = &dev->queues[0];
@@ -2168,6 +2192,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2168 } while (1); 2192 } while (1);
2169 adminq->q_db = dev->dbs; 2193 adminq->q_db = dev->dbs;
2170 2194
2195 retry:
2171 /* Deregister the admin queue's interrupt */ 2196 /* Deregister the admin queue's interrupt */
2172 pci_free_irq(pdev, 0, adminq); 2197 pci_free_irq(pdev, 0, adminq);
2173 2198
@@ -2185,25 +2210,34 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2185 result = max(result - 1, 1); 2210 result = max(result - 1, 1);
2186 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 2211 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
2187 2212
2188 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2189 dev->io_queues[HCTX_TYPE_DEFAULT],
2190 dev->io_queues[HCTX_TYPE_READ],
2191 dev->io_queues[HCTX_TYPE_POLL]);
2192
2193 /* 2213 /*
2194 * Should investigate if there's a performance win from allocating 2214 * Should investigate if there's a performance win from allocating
2195 * more queues than interrupt vectors; it might allow the submission 2215 * more queues than interrupt vectors; it might allow the submission
2196 * path to scale better, even if the receive path is limited by the 2216 * path to scale better, even if the receive path is limited by the
2197 * number of interrupts. 2217 * number of interrupts.
2198 */ 2218 */
2199
2200 result = queue_request_irq(adminq); 2219 result = queue_request_irq(adminq);
2201 if (result) { 2220 if (result) {
2202 adminq->cq_vector = -1; 2221 adminq->cq_vector = -1;
2203 return result; 2222 return result;
2204 } 2223 }
2205 set_bit(NVMEQ_ENABLED, &adminq->flags); 2224 set_bit(NVMEQ_ENABLED, &adminq->flags);
2206 return nvme_create_io_queues(dev); 2225
2226 result = nvme_create_io_queues(dev);
2227 if (result || dev->online_queues < 2)
2228 return result;
2229
2230 if (dev->online_queues - 1 < dev->max_qid) {
2231 nr_io_queues = dev->online_queues - 1;
2232 nvme_disable_io_queues(dev);
2233 nvme_suspend_io_queues(dev);
2234 goto retry;
2235 }
2236 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2237 dev->io_queues[HCTX_TYPE_DEFAULT],
2238 dev->io_queues[HCTX_TYPE_READ],
2239 dev->io_queues[HCTX_TYPE_POLL]);
2240 return 0;
2207} 2241}
2208 2242
2209static void nvme_del_queue_end(struct request *req, blk_status_t error) 2243static void nvme_del_queue_end(struct request *req, blk_status_t error)
@@ -2248,7 +2282,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
2248 return 0; 2282 return 0;
2249} 2283}
2250 2284
2251static bool nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2285static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
2252{ 2286{
2253 int nr_queues = dev->online_queues - 1, sent = 0; 2287 int nr_queues = dev->online_queues - 1, sent = 0;
2254 unsigned long timeout; 2288 unsigned long timeout;
@@ -2294,7 +2328,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
2294 dev->tagset.nr_maps = 2; /* default + read */ 2328 dev->tagset.nr_maps = 2; /* default + read */
2295 if (dev->io_queues[HCTX_TYPE_POLL]) 2329 if (dev->io_queues[HCTX_TYPE_POLL])
2296 dev->tagset.nr_maps++; 2330 dev->tagset.nr_maps++;
2297 dev->tagset.nr_maps = HCTX_MAX_TYPES;
2298 dev->tagset.timeout = NVME_IO_TIMEOUT; 2331 dev->tagset.timeout = NVME_IO_TIMEOUT;
2299 dev->tagset.numa_node = dev_to_node(dev->dev); 2332 dev->tagset.numa_node = dev_to_node(dev->dev);
2300 dev->tagset.queue_depth = 2333 dev->tagset.queue_depth =
@@ -2410,7 +2443,6 @@ static void nvme_pci_disable(struct nvme_dev *dev)
2410 2443
2411static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 2444static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2412{ 2445{
2413 int i;
2414 bool dead = true; 2446 bool dead = true;
2415 struct pci_dev *pdev = to_pci_dev(dev->dev); 2447 struct pci_dev *pdev = to_pci_dev(dev->dev);
2416 2448
@@ -2437,13 +2469,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2437 nvme_stop_queues(&dev->ctrl); 2469 nvme_stop_queues(&dev->ctrl);
2438 2470
2439 if (!dead && dev->ctrl.queue_count > 0) { 2471 if (!dead && dev->ctrl.queue_count > 0) {
2440 if (nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 2472 nvme_disable_io_queues(dev);
2441 nvme_disable_io_queues(dev, nvme_admin_delete_cq);
2442 nvme_disable_admin_queue(dev, shutdown); 2473 nvme_disable_admin_queue(dev, shutdown);
2443 } 2474 }
2444 for (i = dev->ctrl.queue_count - 1; i >= 0; i--) 2475 nvme_suspend_io_queues(dev);
2445 nvme_suspend_queue(&dev->queues[i]); 2476 nvme_suspend_queue(&dev->queues[0]);
2446
2447 nvme_pci_disable(dev); 2477 nvme_pci_disable(dev);
2448 2478
2449 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 2479 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
@@ -2527,27 +2557,18 @@ static void nvme_reset_work(struct work_struct *work)
2527 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2557 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2528 nvme_dev_disable(dev, false); 2558 nvme_dev_disable(dev, false);
2529 2559
2530 /* 2560 mutex_lock(&dev->shutdown_lock);
2531 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2532 * initializing procedure here.
2533 */
2534 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2535 dev_warn(dev->ctrl.device,
2536 "failed to mark controller CONNECTING\n");
2537 goto out;
2538 }
2539
2540 result = nvme_pci_enable(dev); 2561 result = nvme_pci_enable(dev);
2541 if (result) 2562 if (result)
2542 goto out; 2563 goto out_unlock;
2543 2564
2544 result = nvme_pci_configure_admin_queue(dev); 2565 result = nvme_pci_configure_admin_queue(dev);
2545 if (result) 2566 if (result)
2546 goto out; 2567 goto out_unlock;
2547 2568
2548 result = nvme_alloc_admin_tags(dev); 2569 result = nvme_alloc_admin_tags(dev);
2549 if (result) 2570 if (result)
2550 goto out; 2571 goto out_unlock;
2551 2572
2552 /* 2573 /*
2553 * Limit the max command size to prevent iod->sg allocations going 2574 * Limit the max command size to prevent iod->sg allocations going
@@ -2555,6 +2576,17 @@ static void nvme_reset_work(struct work_struct *work)
2555 */ 2576 */
2556 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; 2577 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
2557 dev->ctrl.max_segments = NVME_MAX_SEGS; 2578 dev->ctrl.max_segments = NVME_MAX_SEGS;
2579 mutex_unlock(&dev->shutdown_lock);
2580
2581 /*
2582 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2583 * initializing procedure here.
2584 */
2585 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2586 dev_warn(dev->ctrl.device,
2587 "failed to mark controller CONNECTING\n");
2588 goto out;
2589 }
2558 2590
2559 result = nvme_init_identify(&dev->ctrl); 2591 result = nvme_init_identify(&dev->ctrl);
2560 if (result) 2592 if (result)
@@ -2619,6 +2651,8 @@ static void nvme_reset_work(struct work_struct *work)
2619 nvme_start_ctrl(&dev->ctrl); 2651 nvme_start_ctrl(&dev->ctrl);
2620 return; 2652 return;
2621 2653
2654 out_unlock:
2655 mutex_unlock(&dev->shutdown_lock);
2622 out: 2656 out:
2623 nvme_remove_dead_ctrl(dev, result); 2657 nvme_remove_dead_ctrl(dev, result);
2624} 2658}
@@ -2946,6 +2980,8 @@ static const struct pci_device_id nvme_id_table[] = {
2946 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 2980 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
2947 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 2981 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
2948 NVME_QUIRK_MEDIUM_PRIO_SQ }, 2982 NVME_QUIRK_MEDIUM_PRIO_SQ },
2983 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
2984 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
2949 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2985 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
2950 .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 2986 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2951 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 2987 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0a2fd2949ad7..52abc3a6de12 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -119,6 +119,7 @@ struct nvme_rdma_ctrl {
119 119
120 struct nvme_ctrl ctrl; 120 struct nvme_ctrl ctrl;
121 bool use_inline_data; 121 bool use_inline_data;
122 u32 io_queues[HCTX_MAX_TYPES];
122}; 123};
123 124
124static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) 125static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
@@ -165,8 +166,8 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
165static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) 166static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
166{ 167{
167 return nvme_rdma_queue_idx(queue) > 168 return nvme_rdma_queue_idx(queue) >
168 queue->ctrl->ctrl.opts->nr_io_queues + 169 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
169 queue->ctrl->ctrl.opts->nr_write_queues; 170 queue->ctrl->io_queues[HCTX_TYPE_READ];
170} 171}
171 172
172static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) 173static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
@@ -661,8 +662,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
661 nr_io_queues = min_t(unsigned int, nr_io_queues, 662 nr_io_queues = min_t(unsigned int, nr_io_queues,
662 ibdev->num_comp_vectors); 663 ibdev->num_comp_vectors);
663 664
664 nr_io_queues += min(opts->nr_write_queues, num_online_cpus()); 665 if (opts->nr_write_queues) {
665 nr_io_queues += min(opts->nr_poll_queues, num_online_cpus()); 666 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
667 min(opts->nr_write_queues, nr_io_queues);
668 nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
669 } else {
670 ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
671 }
672
673 ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
674
675 if (opts->nr_poll_queues) {
676 ctrl->io_queues[HCTX_TYPE_POLL] =
677 min(opts->nr_poll_queues, num_online_cpus());
678 nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
679 }
666 680
667 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 681 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
668 if (ret) 682 if (ret)
@@ -1689,18 +1703,28 @@ static enum blk_eh_timer_return
1689nvme_rdma_timeout(struct request *rq, bool reserved) 1703nvme_rdma_timeout(struct request *rq, bool reserved)
1690{ 1704{
1691 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1705 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1706 struct nvme_rdma_queue *queue = req->queue;
1707 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1692 1708
1693 dev_warn(req->queue->ctrl->ctrl.device, 1709 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
1694 "I/O %d QID %d timeout, reset controller\n", 1710 rq->tag, nvme_rdma_queue_idx(queue));
1695 rq->tag, nvme_rdma_queue_idx(req->queue));
1696 1711
1697 /* queue error recovery */ 1712 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
1698 nvme_rdma_error_recovery(req->queue->ctrl); 1713 /*
1714 * Teardown immediately if controller times out while starting
1715 * or we are already started error recovery. all outstanding
1716 * requests are completed on shutdown, so we return BLK_EH_DONE.
1717 */
1718 flush_work(&ctrl->err_work);
1719 nvme_rdma_teardown_io_queues(ctrl, false);
1720 nvme_rdma_teardown_admin_queue(ctrl, false);
1721 return BLK_EH_DONE;
1722 }
1699 1723
1700 /* fail with DNR on cmd timeout */ 1724 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1701 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; 1725 nvme_rdma_error_recovery(ctrl);
1702 1726
1703 return BLK_EH_DONE; 1727 return BLK_EH_RESET_TIMER;
1704} 1728}
1705 1729
1706static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1730static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1779,17 +1803,15 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
1779 struct nvme_rdma_ctrl *ctrl = set->driver_data; 1803 struct nvme_rdma_ctrl *ctrl = set->driver_data;
1780 1804
1781 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 1805 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1782 set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues; 1806 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1807 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1808 set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
1783 if (ctrl->ctrl.opts->nr_write_queues) { 1809 if (ctrl->ctrl.opts->nr_write_queues) {
1784 /* separate read/write queues */ 1810 /* separate read/write queues */
1785 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1786 ctrl->ctrl.opts->nr_write_queues;
1787 set->map[HCTX_TYPE_READ].queue_offset = 1811 set->map[HCTX_TYPE_READ].queue_offset =
1788 ctrl->ctrl.opts->nr_write_queues; 1812 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1789 } else { 1813 } else {
1790 /* mixed read/write queues */ 1814 /* mixed read/write queues */
1791 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1792 ctrl->ctrl.opts->nr_io_queues;
1793 set->map[HCTX_TYPE_READ].queue_offset = 0; 1815 set->map[HCTX_TYPE_READ].queue_offset = 0;
1794 } 1816 }
1795 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], 1817 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
@@ -1799,12 +1821,12 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
1799 1821
1800 if (ctrl->ctrl.opts->nr_poll_queues) { 1822 if (ctrl->ctrl.opts->nr_poll_queues) {
1801 set->map[HCTX_TYPE_POLL].nr_queues = 1823 set->map[HCTX_TYPE_POLL].nr_queues =
1802 ctrl->ctrl.opts->nr_poll_queues; 1824 ctrl->io_queues[HCTX_TYPE_POLL];
1803 set->map[HCTX_TYPE_POLL].queue_offset = 1825 set->map[HCTX_TYPE_POLL].queue_offset =
1804 ctrl->ctrl.opts->nr_io_queues; 1826 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1805 if (ctrl->ctrl.opts->nr_write_queues) 1827 if (ctrl->ctrl.opts->nr_write_queues)
1806 set->map[HCTX_TYPE_POLL].queue_offset += 1828 set->map[HCTX_TYPE_POLL].queue_offset +=
1807 ctrl->ctrl.opts->nr_write_queues; 1829 ctrl->io_queues[HCTX_TYPE_READ];
1808 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); 1830 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
1809 } 1831 }
1810 return 0; 1832 return 0;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index de174912445e..5f0a00425242 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1565,8 +1565,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1565{ 1565{
1566 nvme_tcp_stop_io_queues(ctrl); 1566 nvme_tcp_stop_io_queues(ctrl);
1567 if (remove) { 1567 if (remove) {
1568 if (ctrl->ops->flags & NVME_F_FABRICS) 1568 blk_cleanup_queue(ctrl->connect_q);
1569 blk_cleanup_queue(ctrl->connect_q);
1570 blk_mq_free_tag_set(ctrl->tagset); 1569 blk_mq_free_tag_set(ctrl->tagset);
1571 } 1570 }
1572 nvme_tcp_free_io_queues(ctrl); 1571 nvme_tcp_free_io_queues(ctrl);
@@ -1587,12 +1586,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1587 goto out_free_io_queues; 1586 goto out_free_io_queues;
1588 } 1587 }
1589 1588
1590 if (ctrl->ops->flags & NVME_F_FABRICS) { 1589 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1591 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); 1590 if (IS_ERR(ctrl->connect_q)) {
1592 if (IS_ERR(ctrl->connect_q)) { 1591 ret = PTR_ERR(ctrl->connect_q);
1593 ret = PTR_ERR(ctrl->connect_q); 1592 goto out_free_tag_set;
1594 goto out_free_tag_set;
1595 }
1596 } 1593 }
1597 } else { 1594 } else {
1598 blk_mq_update_nr_hw_queues(ctrl->tagset, 1595 blk_mq_update_nr_hw_queues(ctrl->tagset,
@@ -1606,7 +1603,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1606 return 0; 1603 return 0;
1607 1604
1608out_cleanup_connect_q: 1605out_cleanup_connect_q:
1609 if (new && (ctrl->ops->flags & NVME_F_FABRICS)) 1606 if (new)
1610 blk_cleanup_queue(ctrl->connect_q); 1607 blk_cleanup_queue(ctrl->connect_q);
1611out_free_tag_set: 1608out_free_tag_set:
1612 if (new) 1609 if (new)
@@ -1620,7 +1617,6 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1620{ 1617{
1621 nvme_tcp_stop_queue(ctrl, 0); 1618 nvme_tcp_stop_queue(ctrl, 0);
1622 if (remove) { 1619 if (remove) {
1623 free_opal_dev(ctrl->opal_dev);
1624 blk_cleanup_queue(ctrl->admin_q); 1620 blk_cleanup_queue(ctrl->admin_q);
1625 blk_mq_free_tag_set(ctrl->admin_tagset); 1621 blk_mq_free_tag_set(ctrl->admin_tagset);
1626 } 1622 }
@@ -1952,20 +1948,23 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
1952 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; 1948 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
1953 struct nvme_tcp_cmd_pdu *pdu = req->pdu; 1949 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1954 1950
1955 dev_dbg(ctrl->ctrl.device, 1951 dev_warn(ctrl->ctrl.device,
1956 "queue %d: timeout request %#x type %d\n", 1952 "queue %d: timeout request %#x type %d\n",
1957 nvme_tcp_queue_id(req->queue), rq->tag, 1953 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
1958 pdu->hdr.type);
1959 1954
1960 if (ctrl->ctrl.state != NVME_CTRL_LIVE) { 1955 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
1961 union nvme_result res = {}; 1956 /*
1962 1957 * Teardown immediately if controller times out while starting
1963 nvme_req(rq)->flags |= NVME_REQ_CANCELLED; 1958 * or we are already started error recovery. all outstanding
1964 nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res); 1959 * requests are completed on shutdown, so we return BLK_EH_DONE.
1960 */
1961 flush_work(&ctrl->err_work);
1962 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
1963 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
1965 return BLK_EH_DONE; 1964 return BLK_EH_DONE;
1966 } 1965 }
1967 1966
1968 /* queue error recovery */ 1967 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1969 nvme_tcp_error_recovery(&ctrl->ctrl); 1968 nvme_tcp_error_recovery(&ctrl->ctrl);
1970 1969
1971 return BLK_EH_RESET_TIMER; 1970 return BLK_EH_RESET_TIMER;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index a8d23eb80192..a884e3a0e8af 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
142static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
143 struct nvmet_rdma_rsp *r);
144static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
145 struct nvmet_rdma_rsp *r);
142 146
143static const struct nvmet_fabrics_ops nvmet_rdma_ops; 147static const struct nvmet_fabrics_ops nvmet_rdma_ops;
144 148
@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
182 spin_unlock_irqrestore(&queue->rsps_lock, flags); 186 spin_unlock_irqrestore(&queue->rsps_lock, flags);
183 187
184 if (unlikely(!rsp)) { 188 if (unlikely(!rsp)) {
185 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); 189 int ret;
190
191 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
186 if (unlikely(!rsp)) 192 if (unlikely(!rsp))
187 return NULL; 193 return NULL;
194 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
195 if (unlikely(ret)) {
196 kfree(rsp);
197 return NULL;
198 }
199
188 rsp->allocated = true; 200 rsp->allocated = true;
189 } 201 }
190 202
@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
197 unsigned long flags; 209 unsigned long flags;
198 210
199 if (unlikely(rsp->allocated)) { 211 if (unlikely(rsp->allocated)) {
212 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
200 kfree(rsp); 213 kfree(rsp);
201 return; 214 return;
202 } 215 }
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 44b37b202e39..ad0df786fe93 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1089,7 +1089,7 @@ out:
1089 1089
1090static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1090static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1091{ 1091{
1092 int result; 1092 int result = 0;
1093 1093
1094 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1094 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1095 return 0; 1095 return 0;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index a09c1c3cf831..49b16f76d78e 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -207,11 +207,8 @@ static void __of_attach_node(struct device_node *np)
207 207
208 if (!of_node_check_flag(np, OF_OVERLAY)) { 208 if (!of_node_check_flag(np, OF_OVERLAY)) {
209 np->name = __of_get_property(np, "name", NULL); 209 np->name = __of_get_property(np, "name", NULL);
210 np->type = __of_get_property(np, "device_type", NULL);
211 if (!np->name) 210 if (!np->name)
212 np->name = "<NULL>"; 211 np->name = "<NULL>";
213 if (!np->type)
214 np->type = "<NULL>";
215 212
216 phandle = __of_get_property(np, "phandle", &sz); 213 phandle = __of_get_property(np, "phandle", &sz);
217 if (!phandle) 214 if (!phandle)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 7099c652c6a5..9cc1461aac7d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -314,12 +314,8 @@ static bool populate_node(const void *blob,
314 populate_properties(blob, offset, mem, np, pathp, dryrun); 314 populate_properties(blob, offset, mem, np, pathp, dryrun);
315 if (!dryrun) { 315 if (!dryrun) {
316 np->name = of_get_property(np, "name", NULL); 316 np->name = of_get_property(np, "name", NULL);
317 np->type = of_get_property(np, "device_type", NULL);
318
319 if (!np->name) 317 if (!np->name)
320 np->name = "<NULL>"; 318 np->name = "<NULL>";
321 if (!np->type)
322 np->type = "<NULL>";
323 } 319 }
324 320
325 *pnp = np; 321 *pnp = np;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 2b5ac43a5690..c423e94baf0f 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -423,12 +423,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
423 423
424 tchild->parent = target->np; 424 tchild->parent = target->np;
425 tchild->name = __of_get_property(node, "name", NULL); 425 tchild->name = __of_get_property(node, "name", NULL);
426 tchild->type = __of_get_property(node, "device_type", NULL);
427 426
428 if (!tchild->name) 427 if (!tchild->name)
429 tchild->name = "<NULL>"; 428 tchild->name = "<NULL>";
430 if (!tchild->type)
431 tchild->type = "<NULL>";
432 429
433 /* ignore obsolete "linux,phandle" */ 430 /* ignore obsolete "linux,phandle" */
434 phandle = __of_get_property(node, "phandle", &size); 431 phandle = __of_get_property(node, "phandle", &size);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index d3185063d369..7eda43c66c91 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -155,7 +155,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
155 dp->parent = parent; 155 dp->parent = parent;
156 156
157 dp->name = of_pdt_get_one_property(node, "name"); 157 dp->name = of_pdt_get_one_property(node, "name");
158 dp->type = of_pdt_get_one_property(node, "device_type");
159 dp->phandle = node; 158 dp->phandle = node;
160 159
161 dp->properties = of_pdt_build_prop_list(node); 160 dp->properties = of_pdt_build_prop_list(node);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 08430031bd28..8631efa1daa1 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
806 806
807 if (!of_device_is_available(remote)) { 807 if (!of_device_is_available(remote)) {
808 pr_debug("not available for remote node\n"); 808 pr_debug("not available for remote node\n");
809 of_node_put(remote);
809 return NULL; 810 return NULL;
810 } 811 }
811 812
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index e5507add8f04..18f1639dbc4a 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -988,11 +988,9 @@ void _opp_free(struct dev_pm_opp *opp)
988 kfree(opp); 988 kfree(opp);
989} 989}
990 990
991static void _opp_kref_release(struct kref *kref) 991static void _opp_kref_release(struct dev_pm_opp *opp,
992 struct opp_table *opp_table)
992{ 993{
993 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
994 struct opp_table *opp_table = opp->opp_table;
995
996 /* 994 /*
997 * Notify the changes in the availability of the operable 995 * Notify the changes in the availability of the operable
998 * frequency/voltage list. 996 * frequency/voltage list.
@@ -1002,7 +1000,22 @@ static void _opp_kref_release(struct kref *kref)
1002 opp_debug_remove_one(opp); 1000 opp_debug_remove_one(opp);
1003 list_del(&opp->node); 1001 list_del(&opp->node);
1004 kfree(opp); 1002 kfree(opp);
1003}
1005 1004
1005static void _opp_kref_release_unlocked(struct kref *kref)
1006{
1007 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1008 struct opp_table *opp_table = opp->opp_table;
1009
1010 _opp_kref_release(opp, opp_table);
1011}
1012
1013static void _opp_kref_release_locked(struct kref *kref)
1014{
1015 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1016 struct opp_table *opp_table = opp->opp_table;
1017
1018 _opp_kref_release(opp, opp_table);
1006 mutex_unlock(&opp_table->lock); 1019 mutex_unlock(&opp_table->lock);
1007} 1020}
1008 1021
@@ -1013,10 +1026,16 @@ void dev_pm_opp_get(struct dev_pm_opp *opp)
1013 1026
1014void dev_pm_opp_put(struct dev_pm_opp *opp) 1027void dev_pm_opp_put(struct dev_pm_opp *opp)
1015{ 1028{
1016 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); 1029 kref_put_mutex(&opp->kref, _opp_kref_release_locked,
1030 &opp->opp_table->lock);
1017} 1031}
1018EXPORT_SYMBOL_GPL(dev_pm_opp_put); 1032EXPORT_SYMBOL_GPL(dev_pm_opp_put);
1019 1033
1034static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
1035{
1036 kref_put(&opp->kref, _opp_kref_release_unlocked);
1037}
1038
1020/** 1039/**
1021 * dev_pm_opp_remove() - Remove an OPP from OPP table 1040 * dev_pm_opp_remove() - Remove an OPP from OPP table
1022 * @dev: device for which we do this operation 1041 * @dev: device for which we do this operation
@@ -1060,6 +1079,40 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
1060} 1079}
1061EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1080EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1062 1081
1082/**
1083 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
1084 * @dev: device for which we do this operation
1085 *
1086 * This function removes all dynamically created OPPs from the opp table.
1087 */
1088void dev_pm_opp_remove_all_dynamic(struct device *dev)
1089{
1090 struct opp_table *opp_table;
1091 struct dev_pm_opp *opp, *temp;
1092 int count = 0;
1093
1094 opp_table = _find_opp_table(dev);
1095 if (IS_ERR(opp_table))
1096 return;
1097
1098 mutex_lock(&opp_table->lock);
1099 list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
1100 if (opp->dynamic) {
1101 dev_pm_opp_put_unlocked(opp);
1102 count++;
1103 }
1104 }
1105 mutex_unlock(&opp_table->lock);
1106
1107 /* Drop the references taken by dev_pm_opp_add() */
1108 while (count--)
1109 dev_pm_opp_put_opp_table(opp_table);
1110
1111 /* Drop the reference taken by _find_opp_table() */
1112 dev_pm_opp_put_opp_table(opp_table);
1113}
1114EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
1115
1063struct dev_pm_opp *_opp_allocate(struct opp_table *table) 1116struct dev_pm_opp *_opp_allocate(struct opp_table *table)
1064{ 1117{
1065 struct dev_pm_opp *opp; 1118 struct dev_pm_opp *opp;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 4310c7a4212e..2ab92409210a 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -21,13 +21,14 @@ menuconfig PCI
21 support for PCI-X and the foundations for PCI Express support. 21 support for PCI-X and the foundations for PCI Express support.
22 Say 'Y' here unless you know what you are doing. 22 Say 'Y' here unless you know what you are doing.
23 23
24if PCI
25
24config PCI_DOMAINS 26config PCI_DOMAINS
25 bool 27 bool
26 depends on PCI 28 depends on PCI
27 29
28config PCI_DOMAINS_GENERIC 30config PCI_DOMAINS_GENERIC
29 bool 31 bool
30 depends on PCI
31 select PCI_DOMAINS 32 select PCI_DOMAINS
32 33
33config PCI_SYSCALL 34config PCI_SYSCALL
@@ -37,7 +38,6 @@ source "drivers/pci/pcie/Kconfig"
37 38
38config PCI_MSI 39config PCI_MSI
39 bool "Message Signaled Interrupts (MSI and MSI-X)" 40 bool "Message Signaled Interrupts (MSI and MSI-X)"
40 depends on PCI
41 select GENERIC_MSI_IRQ 41 select GENERIC_MSI_IRQ
42 help 42 help
43 This allows device drivers to enable MSI (Message Signaled 43 This allows device drivers to enable MSI (Message Signaled
@@ -59,7 +59,6 @@ config PCI_MSI_IRQ_DOMAIN
59config PCI_QUIRKS 59config PCI_QUIRKS
60 default y 60 default y
61 bool "Enable PCI quirk workarounds" if EXPERT 61 bool "Enable PCI quirk workarounds" if EXPERT
62 depends on PCI
63 help 62 help
64 This enables workarounds for various PCI chipset bugs/quirks. 63 This enables workarounds for various PCI chipset bugs/quirks.
65 Disable this only if your target machine is unaffected by PCI 64 Disable this only if your target machine is unaffected by PCI
@@ -67,7 +66,7 @@ config PCI_QUIRKS
67 66
68config PCI_DEBUG 67config PCI_DEBUG
69 bool "PCI Debugging" 68 bool "PCI Debugging"
70 depends on PCI && DEBUG_KERNEL 69 depends on DEBUG_KERNEL
71 help 70 help
72 Say Y here if you want the PCI core to produce a bunch of debug 71 Say Y here if you want the PCI core to produce a bunch of debug
73 messages to the system log. Select this if you are having a 72 messages to the system log. Select this if you are having a
@@ -77,7 +76,6 @@ config PCI_DEBUG
77 76
78config PCI_REALLOC_ENABLE_AUTO 77config PCI_REALLOC_ENABLE_AUTO
79 bool "Enable PCI resource re-allocation detection" 78 bool "Enable PCI resource re-allocation detection"
80 depends on PCI
81 depends on PCI_IOV 79 depends on PCI_IOV
82 help 80 help
83 Say Y here if you want the PCI core to detect if PCI resource 81 Say Y here if you want the PCI core to detect if PCI resource
@@ -90,7 +88,6 @@ config PCI_REALLOC_ENABLE_AUTO
90 88
91config PCI_STUB 89config PCI_STUB
92 tristate "PCI Stub driver" 90 tristate "PCI Stub driver"
93 depends on PCI
94 help 91 help
95 Say Y or M here if you want be able to reserve a PCI device 92 Say Y or M here if you want be able to reserve a PCI device
96 when it is going to be assigned to a guest operating system. 93 when it is going to be assigned to a guest operating system.
@@ -99,7 +96,6 @@ config PCI_STUB
99 96
100config PCI_PF_STUB 97config PCI_PF_STUB
101 tristate "PCI PF Stub driver" 98 tristate "PCI PF Stub driver"
102 depends on PCI
103 depends on PCI_IOV 99 depends on PCI_IOV
104 help 100 help
105 Say Y or M here if you want to enable support for devices that 101 Say Y or M here if you want to enable support for devices that
@@ -111,7 +107,7 @@ config PCI_PF_STUB
111 107
112config XEN_PCIDEV_FRONTEND 108config XEN_PCIDEV_FRONTEND
113 tristate "Xen PCI Frontend" 109 tristate "Xen PCI Frontend"
114 depends on PCI && X86 && XEN 110 depends on X86 && XEN
115 select PCI_XEN 111 select PCI_XEN
116 select XEN_XENBUS_FRONTEND 112 select XEN_XENBUS_FRONTEND
117 default y 113 default y
@@ -133,7 +129,6 @@ config PCI_BRIDGE_EMUL
133 129
134config PCI_IOV 130config PCI_IOV
135 bool "PCI IOV support" 131 bool "PCI IOV support"
136 depends on PCI
137 select PCI_ATS 132 select PCI_ATS
138 help 133 help
139 I/O Virtualization is a PCI feature supported by some devices 134 I/O Virtualization is a PCI feature supported by some devices
@@ -144,7 +139,6 @@ config PCI_IOV
144 139
145config PCI_PRI 140config PCI_PRI
146 bool "PCI PRI support" 141 bool "PCI PRI support"
147 depends on PCI
148 select PCI_ATS 142 select PCI_ATS
149 help 143 help
150 PRI is the PCI Page Request Interface. It allows PCI devices that are 144 PRI is the PCI Page Request Interface. It allows PCI devices that are
@@ -154,7 +148,6 @@ config PCI_PRI
154 148
155config PCI_PASID 149config PCI_PASID
156 bool "PCI PASID support" 150 bool "PCI PASID support"
157 depends on PCI
158 select PCI_ATS 151 select PCI_ATS
159 help 152 help
160 Process Address Space Identifiers (PASIDs) can be used by PCI devices 153 Process Address Space Identifiers (PASIDs) can be used by PCI devices
@@ -167,7 +160,7 @@ config PCI_PASID
167 160
168config PCI_P2PDMA 161config PCI_P2PDMA
169 bool "PCI peer-to-peer transfer support" 162 bool "PCI peer-to-peer transfer support"
170 depends on PCI && ZONE_DEVICE 163 depends on ZONE_DEVICE
171 select GENERIC_ALLOCATOR 164 select GENERIC_ALLOCATOR
172 help 165 help
173 Enableѕ drivers to do PCI peer-to-peer transactions to and from 166 Enableѕ drivers to do PCI peer-to-peer transactions to and from
@@ -184,12 +177,11 @@ config PCI_P2PDMA
184 177
185config PCI_LABEL 178config PCI_LABEL
186 def_bool y if (DMI || ACPI) 179 def_bool y if (DMI || ACPI)
187 depends on PCI
188 select NLS 180 select NLS
189 181
190config PCI_HYPERV 182config PCI_HYPERV
191 tristate "Hyper-V PCI Frontend" 183 tristate "Hyper-V PCI Frontend"
192 depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 184 depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
193 help 185 help
194 The PCI device frontend driver allows the kernel to import arbitrary 186 The PCI device frontend driver allows the kernel to import arbitrary
195 PCI devices from a PCI backend to support PCI driver domains. 187 PCI devices from a PCI backend to support PCI driver domains.
@@ -198,3 +190,5 @@ source "drivers/pci/hotplug/Kconfig"
198source "drivers/pci/controller/Kconfig" 190source "drivers/pci/controller/Kconfig"
199source "drivers/pci/endpoint/Kconfig" 191source "drivers/pci/endpoint/Kconfig"
200source "drivers/pci/switch/Kconfig" 192source "drivers/pci/switch/Kconfig"
193
194endif
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 52e47dac028f..80f843030e36 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -310,6 +310,9 @@ static int imx6_pcie_attach_pd(struct device *dev)
310 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 310 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
311 if (IS_ERR(imx6_pcie->pd_pcie)) 311 if (IS_ERR(imx6_pcie->pd_pcie))
312 return PTR_ERR(imx6_pcie->pd_pcie); 312 return PTR_ERR(imx6_pcie->pd_pcie);
313 /* Do nothing when power domain missing */
314 if (!imx6_pcie->pd_pcie)
315 return 0;
313 link = device_link_add(dev, imx6_pcie->pd_pcie, 316 link = device_link_add(dev, imx6_pcie->pd_pcie,
314 DL_FLAG_STATELESS | 317 DL_FLAG_STATELESS |
315 DL_FLAG_PM_RUNTIME | 318 DL_FLAG_PM_RUNTIME |
@@ -323,13 +326,13 @@ static int imx6_pcie_attach_pd(struct device *dev)
323 if (IS_ERR(imx6_pcie->pd_pcie_phy)) 326 if (IS_ERR(imx6_pcie->pd_pcie_phy))
324 return PTR_ERR(imx6_pcie->pd_pcie_phy); 327 return PTR_ERR(imx6_pcie->pd_pcie_phy);
325 328
326 device_link_add(dev, imx6_pcie->pd_pcie_phy, 329 link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
327 DL_FLAG_STATELESS | 330 DL_FLAG_STATELESS |
328 DL_FLAG_PM_RUNTIME | 331 DL_FLAG_PM_RUNTIME |
329 DL_FLAG_RPM_ACTIVE); 332 DL_FLAG_RPM_ACTIVE);
330 if (IS_ERR(link)) { 333 if (!link) {
331 dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link)); 334 dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
332 return PTR_ERR(link); 335 return -EINVAL;
333 } 336 }
334 337
335 return 0; 338 return 0;
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 241ebe0c4505..e35e9eaa50ee 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/gpio/consumer.h>
11#include <linux/of_device.h> 12#include <linux/of_device.h>
12#include <linux/of_gpio.h> 13#include <linux/of_gpio.h>
13#include <linux/pci.h> 14#include <linux/pci.h>
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index b171b6bc15c8..0c389a30ef5d 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -22,7 +22,6 @@
22#include <linux/resource.h> 22#include <linux/resource.h>
23#include <linux/of_pci.h> 23#include <linux/of_pci.h>
24#include <linux/of_irq.h> 24#include <linux/of_irq.h>
25#include <linux/gpio/consumer.h>
26 25
27#include "pcie-designware.h" 26#include "pcie-designware.h"
28 27
@@ -30,7 +29,6 @@ struct armada8k_pcie {
30 struct dw_pcie *pci; 29 struct dw_pcie *pci;
31 struct clk *clk; 30 struct clk *clk;
32 struct clk *clk_reg; 31 struct clk *clk_reg;
33 struct gpio_desc *reset_gpio;
34}; 32};
35 33
36#define PCIE_VENDOR_REGS_OFFSET 0x8000 34#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -139,12 +137,6 @@ static int armada8k_pcie_host_init(struct pcie_port *pp)
139 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 137 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
140 struct armada8k_pcie *pcie = to_armada8k_pcie(pci); 138 struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
141 139
142 if (pcie->reset_gpio) {
143 /* assert and then deassert the reset signal */
144 gpiod_set_value_cansleep(pcie->reset_gpio, 1);
145 msleep(100);
146 gpiod_set_value_cansleep(pcie->reset_gpio, 0);
147 }
148 dw_pcie_setup_rc(pp); 140 dw_pcie_setup_rc(pp);
149 armada8k_pcie_establish_link(pcie); 141 armada8k_pcie_establish_link(pcie);
150 142
@@ -257,14 +249,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
257 goto fail_clkreg; 249 goto fail_clkreg;
258 } 250 }
259 251
260 /* Get reset gpio signal and hold asserted (logically high) */
261 pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
262 GPIOD_OUT_HIGH);
263 if (IS_ERR(pcie->reset_gpio)) {
264 ret = PTR_ERR(pcie->reset_gpio);
265 goto fail_clkreg;
266 }
267
268 platform_set_drvdata(pdev, pcie); 252 platform_set_drvdata(pdev, pcie);
269 253
270 ret = armada8k_add_pcie_port(pcie, pdev); 254 ret = armada8k_add_pcie_port(pcie, pdev);
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 9deb56989d72..cb3401a931f8 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -602,9 +602,9 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
602 } 602 }
603 603
604 /* Reserve memory for event queue and make sure memories are zeroed */ 604 /* Reserve memory for event queue and make sure memories are zeroed */
605 msi->eq_cpu = dma_zalloc_coherent(pcie->dev, 605 msi->eq_cpu = dma_alloc_coherent(pcie->dev,
606 msi->nr_eq_region * EQ_MEM_REGION_SIZE, 606 msi->nr_eq_region * EQ_MEM_REGION_SIZE,
607 &msi->eq_dma, GFP_KERNEL); 607 &msi->eq_dma, GFP_KERNEL);
608 if (!msi->eq_cpu) { 608 if (!msi->eq_cpu) {
609 ret = -ENOMEM; 609 ret = -ENOMEM;
610 goto free_irqs; 610 goto free_irqs;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7a1c8a09efa5..4c0b47867258 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1168,7 +1168,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1168 const struct irq_affinity *affd) 1168 const struct irq_affinity *affd)
1169{ 1169{
1170 static const struct irq_affinity msi_default_affd; 1170 static const struct irq_affinity msi_default_affd;
1171 int vecs = -ENOSPC; 1171 int msix_vecs = -ENOSPC;
1172 int msi_vecs = -ENOSPC;
1172 1173
1173 if (flags & PCI_IRQ_AFFINITY) { 1174 if (flags & PCI_IRQ_AFFINITY) {
1174 if (!affd) 1175 if (!affd)
@@ -1179,16 +1180,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1179 } 1180 }
1180 1181
1181 if (flags & PCI_IRQ_MSIX) { 1182 if (flags & PCI_IRQ_MSIX) {
1182 vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, 1183 msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs,
1183 affd); 1184 max_vecs, affd);
1184 if (vecs > 0) 1185 if (msix_vecs > 0)
1185 return vecs; 1186 return msix_vecs;
1186 } 1187 }
1187 1188
1188 if (flags & PCI_IRQ_MSI) { 1189 if (flags & PCI_IRQ_MSI) {
1189 vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); 1190 msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs,
1190 if (vecs > 0) 1191 affd);
1191 return vecs; 1192 if (msi_vecs > 0)
1193 return msi_vecs;
1192 } 1194 }
1193 1195
1194 /* use legacy irq if allowed */ 1196 /* use legacy irq if allowed */
@@ -1199,7 +1201,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1199 } 1201 }
1200 } 1202 }
1201 1203
1202 return vecs; 1204 if (msix_vecs == -ENOSPC)
1205 return -ENOSPC;
1206 return msi_vecs;
1203} 1207}
1204EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); 1208EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
1205 1209
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c9d8e3c837de..c25acace7d91 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str)
6195 } else if (!strncmp(str, "pcie_scan_all", 13)) { 6195 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6196 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); 6196 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6197 } else if (!strncmp(str, "disable_acs_redir=", 18)) { 6197 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6198 disable_acs_redir_param = str + 18; 6198 disable_acs_redir_param =
6199 kstrdup(str + 18, GFP_KERNEL);
6199 } else { 6200 } else {
6200 printk(KERN_ERR "PCI: Unknown option `%s'\n", 6201 printk(KERN_ERR "PCI: Unknown option `%s'\n",
6201 str); 6202 str);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b0a413f3f7ca..e2a879e93d86 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -639,8 +639,9 @@ static void quirk_synopsys_haps(struct pci_dev *pdev)
639 break; 639 break;
640 } 640 }
641} 641}
642DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, 642DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
643 quirk_synopsys_haps); 643 PCI_CLASS_SERIAL_USB_XHCI, 0,
644 quirk_synopsys_haps);
644 645
645/* 646/*
646 * Let's make the southbridge information explicit instead of having to 647 * Let's make the southbridge information explicit instead of having to
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 6c5536d3d42a..e22766c79fe9 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1373,10 +1373,10 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
1373 if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0) 1373 if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
1374 return 0; 1374 return 0;
1375 1375
1376 stdev->dma_mrpc = dma_zalloc_coherent(&stdev->pdev->dev, 1376 stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
1377 sizeof(*stdev->dma_mrpc), 1377 sizeof(*stdev->dma_mrpc),
1378 &stdev->dma_mrpc_dma_addr, 1378 &stdev->dma_mrpc_dma_addr,
1379 GFP_KERNEL); 1379 GFP_KERNEL);
1380 if (stdev->dma_mrpc == NULL) 1380 if (stdev->dma_mrpc == NULL)
1381 return -ENOMEM; 1381 return -ENOMEM;
1382 1382
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index a91fc67fc4e0..d70ba9bc42d9 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -32,7 +32,7 @@
32 32
33/* register 0x01 */ 33/* register 0x01 */
34#define REF_FREF_SEL_25 BIT(0) 34#define REF_FREF_SEL_25 BIT(0)
35#define PHY_MODE_SATA (0x0 << 5) 35#define PHY_BERLIN_MODE_SATA (0x0 << 5)
36 36
37/* register 0x02 */ 37/* register 0x02 */
38#define USE_MAX_PLL_RATE BIT(12) 38#define USE_MAX_PLL_RATE BIT(12)
@@ -102,7 +102,8 @@ static int phy_berlin_sata_power_on(struct phy *phy)
102 102
103 /* set PHY mode and ref freq to 25 MHz */ 103 /* set PHY mode and ref freq to 25 MHz */
104 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01, 104 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01,
105 0x00ff, REF_FREF_SEL_25 | PHY_MODE_SATA); 105 0x00ff,
106 REF_FREF_SEL_25 | PHY_BERLIN_MODE_SATA);
106 107
107 /* set PHY up to 6 Gbps */ 108 /* set PHY up to 6 Gbps */
108 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25, 109 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25,
diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c
index 6fd6e07ab345..09a77e556ece 100644
--- a/drivers/phy/qualcomm/phy-ath79-usb.c
+++ b/drivers/phy/qualcomm/phy-ath79-usb.c
@@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy)
31 31
32 err = reset_control_deassert(priv->reset); 32 err = reset_control_deassert(priv->reset);
33 if (err && priv->no_suspend_override) 33 if (err && priv->no_suspend_override)
34 reset_control_assert(priv->no_suspend_override); 34 reset_control_deassert(priv->no_suspend_override);
35 35
36 return err; 36 return err;
37} 37}
@@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev)
69 if (!priv) 69 if (!priv)
70 return -ENOMEM; 70 return -ENOMEM;
71 71
72 priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy"); 72 priv->reset = devm_reset_control_get(&pdev->dev, "phy");
73 if (IS_ERR(priv->reset)) 73 if (IS_ERR(priv->reset))
74 return PTR_ERR(priv->reset); 74 return PTR_ERR(priv->reset);
75 75
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index f137e0107764..c4709ed7fb0e 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -82,6 +82,7 @@ config PHY_TI_GMII_SEL
82 default y if TI_CPSW=y 82 default y if TI_CPSW=y
83 depends on TI_CPSW || COMPILE_TEST 83 depends on TI_CPSW || COMPILE_TEST
84 select GENERIC_PHY 84 select GENERIC_PHY
85 select REGMAP
85 default m 86 default m
86 help 87 help
87 This driver supports configuring of the TI CPSW Port mode depending on 88 This driver supports configuring of the TI CPSW Port mode depending on
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 77fdaa551977..a52c5bb35033 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -204,11 +204,11 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev,
204 204
205 if (args->args_count < 1) 205 if (args->args_count < 1)
206 return ERR_PTR(-EINVAL); 206 return ERR_PTR(-EINVAL);
207 if (!priv || !priv->if_phys)
208 return ERR_PTR(-ENODEV);
207 if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) && 209 if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) &&
208 args->args_count < 2) 210 args->args_count < 2)
209 return ERR_PTR(-EINVAL); 211 return ERR_PTR(-EINVAL);
210 if (!priv || !priv->if_phys)
211 return ERR_PTR(-ENODEV);
212 if (phy_id > priv->soc_data->num_ports) 212 if (phy_id > priv->soc_data->num_ports)
213 return ERR_PTR(-EINVAL); 213 return ERR_PTR(-EINVAL);
214 if (phy_id != priv->if_phys[phy_id - 1].id) 214 if (phy_id != priv->if_phys[phy_id - 1].id)
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 05044e323ea5..03ec7a5d9d0b 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1513 .matches = { 1513 .matches = {
1514 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1514 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1515 DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), 1515 DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
1516 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1516 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1517 }, 1517 },
1518 }, 1518 },
1519 { 1519 {
@@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1521 .matches = { 1521 .matches = {
1522 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 1522 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1523 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), 1523 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
1524 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1524 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1525 }, 1525 },
1526 }, 1526 },
1527 { 1527 {
@@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1529 .matches = { 1529 .matches = {
1530 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1530 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1531 DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), 1531 DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
1532 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1532 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1533 }, 1533 },
1534 }, 1534 },
1535 { 1535 {
@@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1537 .matches = { 1537 .matches = {
1538 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1538 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1539 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), 1539 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
1540 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1540 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1541 }, 1541 },
1542 }, 1542 },
1543 {} 1543 {}
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 1817786ab6aa..a005cbccb4f7 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -45,12 +45,14 @@ config PINCTRL_MT2701
45config PINCTRL_MT7623 45config PINCTRL_MT7623
46 bool "Mediatek MT7623 pin control with generic binding" 46 bool "Mediatek MT7623 pin control with generic binding"
47 depends on MACH_MT7623 || COMPILE_TEST 47 depends on MACH_MT7623 || COMPILE_TEST
48 depends on OF
48 default MACH_MT7623 49 default MACH_MT7623
49 select PINCTRL_MTK_MOORE 50 select PINCTRL_MTK_MOORE
50 51
51config PINCTRL_MT7629 52config PINCTRL_MT7629
52 bool "Mediatek MT7629 pin control" 53 bool "Mediatek MT7629 pin control"
53 depends on MACH_MT7629 || COMPILE_TEST 54 depends on MACH_MT7629 || COMPILE_TEST
55 depends on OF
54 default MACH_MT7629 56 default MACH_MT7629
55 select PINCTRL_MTK_MOORE 57 select PINCTRL_MTK_MOORE
56 58
@@ -92,6 +94,7 @@ config PINCTRL_MT6797
92 94
93config PINCTRL_MT7622 95config PINCTRL_MT7622
94 bool "MediaTek MT7622 pin control" 96 bool "MediaTek MT7622 pin control"
97 depends on OF
95 depends on ARM64 || COMPILE_TEST 98 depends on ARM64 || COMPILE_TEST
96 default ARM64 && ARCH_MEDIATEK 99 default ARM64 && ARCH_MEDIATEK
97 select PINCTRL_MTK_MOORE 100 select PINCTRL_MTK_MOORE
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index c69ca95b1ad5..0f140a802137 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -693,7 +693,7 @@ static const char * const sd_a_groups[] = {
693 693
694static const char * const sdxc_a_groups[] = { 694static const char * const sdxc_a_groups[] = {
695 "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", 695 "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
696 "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a" 696 "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
697}; 697};
698 698
699static const char * const pcm_a_groups[] = { 699static const char * const pcm_a_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index b03481ef99a1..98905d4a79ca 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
832 break; 832 break;
833 833
834 case MCP_TYPE_S18: 834 case MCP_TYPE_S18:
835 one_regmap_config =
836 devm_kmemdup(dev, &mcp23x17_regmap,
837 sizeof(struct regmap_config), GFP_KERNEL);
838 if (!one_regmap_config)
839 return -ENOMEM;
835 mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, 840 mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
836 &mcp23x17_regmap); 841 one_regmap_config);
837 mcp->reg_shift = 1; 842 mcp->reg_shift = 1;
838 mcp->chip.ngpio = 16; 843 mcp->chip.ngpio = 16;
839 mcp->chip.label = "mcp23s18"; 844 mcp->chip.label = "mcp23s18";
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index 7aae52a09ff0..4ffd56ff809e 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -79,7 +79,7 @@ enum {
79 .intr_cfg_reg = 0, \ 79 .intr_cfg_reg = 0, \
80 .intr_status_reg = 0, \ 80 .intr_status_reg = 0, \
81 .intr_target_reg = 0, \ 81 .intr_target_reg = 0, \
82 .tile = NORTH, \ 82 .tile = SOUTH, \
83 .mux_bit = -1, \ 83 .mux_bit = -1, \
84 .pull_bit = pull, \ 84 .pull_bit = pull, \
85 .drv_bit = drv, \ 85 .drv_bit = drv, \
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
index aa8b58125568..ef4268cc6227 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
588static const struct sunxi_pinctrl_desc h6_pinctrl_data = { 588static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
589 .pins = h6_pins, 589 .pins = h6_pins,
590 .npins = ARRAY_SIZE(h6_pins), 590 .npins = ARRAY_SIZE(h6_pins),
591 .irq_banks = 3, 591 .irq_banks = 4,
592 .irq_bank_map = h6_irq_bank_map, 592 .irq_bank_map = h6_irq_bank_map,
593 .irq_read_needs_mux = true, 593 .irq_read_needs_mux = true,
594}; 594};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 5d9184d18c16..0e7fa69e93df 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -698,26 +698,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
698{ 698{
699 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 699 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
700 unsigned short bank = offset / PINS_PER_BANK; 700 unsigned short bank = offset / PINS_PER_BANK;
701 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; 701 unsigned short bank_offset = bank - pctl->desc->pin_base /
702 struct regulator *reg; 702 PINS_PER_BANK;
703 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
704 struct regulator *reg = s_reg->regulator;
705 char supply[16];
703 int ret; 706 int ret;
704 707
705 reg = s_reg->regulator; 708 if (reg) {
706 if (!reg) {
707 char supply[16];
708
709 snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
710 reg = regulator_get(pctl->dev, supply);
711 if (IS_ERR(reg)) {
712 dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
713 'A' + bank);
714 return PTR_ERR(reg);
715 }
716
717 s_reg->regulator = reg;
718 refcount_set(&s_reg->refcount, 1);
719 } else {
720 refcount_inc(&s_reg->refcount); 709 refcount_inc(&s_reg->refcount);
710 return 0;
711 }
712
713 snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
714 reg = regulator_get(pctl->dev, supply);
715 if (IS_ERR(reg)) {
716 dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
717 'A' + bank);
718 return PTR_ERR(reg);
721 } 719 }
722 720
723 ret = regulator_enable(reg); 721 ret = regulator_enable(reg);
@@ -727,13 +725,13 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
727 goto out; 725 goto out;
728 } 726 }
729 727
728 s_reg->regulator = reg;
729 refcount_set(&s_reg->refcount, 1);
730
730 return 0; 731 return 0;
731 732
732out: 733out:
733 if (refcount_dec_and_test(&s_reg->refcount)) { 734 regulator_put(s_reg->regulator);
734 regulator_put(s_reg->regulator);
735 s_reg->regulator = NULL;
736 }
737 735
738 return ret; 736 return ret;
739} 737}
@@ -742,7 +740,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset)
742{ 740{
743 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 741 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
744 unsigned short bank = offset / PINS_PER_BANK; 742 unsigned short bank = offset / PINS_PER_BANK;
745 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; 743 unsigned short bank_offset = bank - pctl->desc->pin_base /
744 PINS_PER_BANK;
745 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
746 746
747 if (!refcount_dec_and_test(&s_reg->refcount)) 747 if (!refcount_dec_and_test(&s_reg->refcount))
748 return 0; 748 return 0;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e340d2a24b44..034c0317c8d6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -136,7 +136,7 @@ struct sunxi_pinctrl {
136 struct gpio_chip *chip; 136 struct gpio_chip *chip;
137 const struct sunxi_pinctrl_desc *desc; 137 const struct sunxi_pinctrl_desc *desc;
138 struct device *dev; 138 struct device *dev;
139 struct sunxi_pinctrl_regulator regulators[12]; 139 struct sunxi_pinctrl_regulator regulators[9];
140 struct irq_domain *domain; 140 struct irq_domain *domain;
141 struct sunxi_pinctrl_function *functions; 141 struct sunxi_pinctrl_function *functions;
142 unsigned nfunctions; 142 unsigned nfunctions;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e3b62c2ee8d1..b5e9db85e881 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -905,6 +905,7 @@ config TOSHIBA_WMI
905config ACPI_CMPC 905config ACPI_CMPC
906 tristate "CMPC Laptop Extras" 906 tristate "CMPC Laptop Extras"
907 depends on ACPI && INPUT 907 depends on ACPI && INPUT
908 depends on BACKLIGHT_LCD_SUPPORT
908 depends on RFKILL || RFKILL=n 909 depends on RFKILL || RFKILL=n
909 select BACKLIGHT_CLASS_DEVICE 910 select BACKLIGHT_CLASS_DEVICE
910 help 911 help
@@ -1009,7 +1010,7 @@ config INTEL_MFLD_THERMAL
1009 1010
1010config INTEL_IPS 1011config INTEL_IPS
1011 tristate "Intel Intelligent Power Sharing" 1012 tristate "Intel Intelligent Power Sharing"
1012 depends on ACPI 1013 depends on ACPI && PCI
1013 ---help--- 1014 ---help---
1014 Intel Calpella platforms support dynamic power sharing between the 1015 Intel Calpella platforms support dynamic power sharing between the
1015 CPU and GPU, maximizing performance in a given TDP. This driver, 1016 CPU and GPU, maximizing performance in a given TDP. This driver,
@@ -1128,6 +1129,7 @@ config INTEL_OAKTRAIL
1128config SAMSUNG_Q10 1129config SAMSUNG_Q10
1129 tristate "Samsung Q10 Extras" 1130 tristate "Samsung Q10 Extras"
1130 depends on ACPI 1131 depends on ACPI
1132 depends on BACKLIGHT_LCD_SUPPORT
1131 select BACKLIGHT_CLASS_DEVICE 1133 select BACKLIGHT_CLASS_DEVICE
1132 ---help--- 1134 ---help---
1133 This driver provides support for backlight control on Samsung Q10 1135 This driver provides support for backlight control on Samsung Q10
@@ -1135,7 +1137,7 @@ config SAMSUNG_Q10
1135 1137
1136config APPLE_GMUX 1138config APPLE_GMUX
1137 tristate "Apple Gmux Driver" 1139 tristate "Apple Gmux Driver"
1138 depends on ACPI 1140 depends on ACPI && PCI
1139 depends on PNP 1141 depends on PNP
1140 depends on BACKLIGHT_CLASS_DEVICE 1142 depends on BACKLIGHT_CLASS_DEVICE
1141 depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE 1143 depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
@@ -1174,7 +1176,7 @@ config INTEL_SMARTCONNECT
1174 1176
1175config INTEL_PMC_IPC 1177config INTEL_PMC_IPC
1176 tristate "Intel PMC IPC Driver" 1178 tristate "Intel PMC IPC Driver"
1177 depends on ACPI 1179 depends on ACPI && PCI
1178 ---help--- 1180 ---help---
1179 This driver provides support for PMC control on some Intel platforms. 1181 This driver provides support for PMC control on some Intel platforms.
1180 The PMC is an ARC processor which defines IPC commands for communication 1182 The PMC is an ARC processor which defines IPC commands for communication
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 797fab33bb98..7cbea796652a 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -224,7 +224,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
224 extoff = NULL; 224 extoff = NULL;
225 break; 225 break;
226 } 226 }
227 if (extoff->n_samples > PTP_MAX_SAMPLES) { 227 if (extoff->n_samples > PTP_MAX_SAMPLES
228 || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
228 err = -EINVAL; 229 err = -EINVAL;
229 break; 230 break;
230 } 231 }
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index bb655854713d..b64c56c33c3b 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -1382,9 +1382,9 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
1382 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 1382 INIT_WORK(&priv->idb_work, tsi721_db_dpc);
1383 1383
1384 /* Allocate buffer for inbound doorbells queue */ 1384 /* Allocate buffer for inbound doorbells queue */
1385 priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, 1385 priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
1386 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 1386 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
1387 &priv->idb_dma, GFP_KERNEL); 1387 &priv->idb_dma, GFP_KERNEL);
1388 if (!priv->idb_base) 1388 if (!priv->idb_base)
1389 return -ENOMEM; 1389 return -ENOMEM;
1390 1390
@@ -1447,9 +1447,9 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
1447 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); 1447 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
1448 1448
1449 /* Allocate space for DMA descriptors */ 1449 /* Allocate space for DMA descriptors */
1450 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1450 bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
1451 bd_num * sizeof(struct tsi721_dma_desc), 1451 bd_num * sizeof(struct tsi721_dma_desc),
1452 &bd_phys, GFP_KERNEL); 1452 &bd_phys, GFP_KERNEL);
1453 if (!bd_ptr) 1453 if (!bd_ptr)
1454 return -ENOMEM; 1454 return -ENOMEM;
1455 1455
@@ -1464,7 +1464,7 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
1464 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 1464 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
1465 bd_num : TSI721_DMA_MINSTSSZ; 1465 bd_num : TSI721_DMA_MINSTSSZ;
1466 sts_size = roundup_pow_of_two(sts_size); 1466 sts_size = roundup_pow_of_two(sts_size);
1467 sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1467 sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
1468 sts_size * sizeof(struct tsi721_dma_sts), 1468 sts_size * sizeof(struct tsi721_dma_sts),
1469 &sts_phys, GFP_KERNEL); 1469 &sts_phys, GFP_KERNEL);
1470 if (!sts_ptr) { 1470 if (!sts_ptr) {
@@ -1939,10 +1939,10 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1939 1939
1940 /* Outbound message descriptor status FIFO allocation */ 1940 /* Outbound message descriptor status FIFO allocation */
1941 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1941 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1942 priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, 1942 priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
1943 priv->omsg_ring[mbox].sts_size * 1943 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
1944 sizeof(struct tsi721_dma_sts), 1944 &priv->omsg_ring[mbox].sts_phys,
1945 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1945 GFP_KERNEL);
1946 if (priv->omsg_ring[mbox].sts_base == NULL) { 1946 if (priv->omsg_ring[mbox].sts_base == NULL) {
1947 tsi_debug(OMSG, &priv->pdev->dev, 1947 tsi_debug(OMSG, &priv->pdev->dev,
1948 "ENOMEM for OB_MSG_%d status FIFO", mbox); 1948 "ENOMEM for OB_MSG_%d status FIFO", mbox);
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 006ea5a45020..7f5d4436f594 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -90,9 +90,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
90 * Allocate space for DMA descriptors 90 * Allocate space for DMA descriptors
91 * (add an extra element for link descriptor) 91 * (add an extra element for link descriptor)
92 */ 92 */
93 bd_ptr = dma_zalloc_coherent(dev, 93 bd_ptr = dma_alloc_coherent(dev,
94 (bd_num + 1) * sizeof(struct tsi721_dma_desc), 94 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
95 &bd_phys, GFP_ATOMIC); 95 &bd_phys, GFP_ATOMIC);
96 if (!bd_ptr) 96 if (!bd_ptr)
97 return -ENOMEM; 97 return -ENOMEM;
98 98
@@ -108,7 +108,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
108 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? 108 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
109 (bd_num + 1) : TSI721_DMA_MINSTSSZ; 109 (bd_num + 1) : TSI721_DMA_MINSTSSZ;
110 sts_size = roundup_pow_of_two(sts_size); 110 sts_size = roundup_pow_of_two(sts_size);
111 sts_ptr = dma_zalloc_coherent(dev, 111 sts_ptr = dma_alloc_coherent(dev,
112 sts_size * sizeof(struct tsi721_dma_sts), 112 sts_size * sizeof(struct tsi721_dma_sts),
113 &sts_phys, GFP_ATOMIC); 113 &sts_phys, GFP_ATOMIC);
114 if (!sts_ptr) { 114 if (!sts_ptr) {
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 183fc42a510a..2d7cd344f3bf 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
153 const bool * ctx, 153 const bool * ctx,
154 struct irq_affinity *desc) 154 struct irq_affinity *desc)
155{ 155{
156 int i, ret; 156 int i, ret, queue_idx = 0;
157 157
158 for (i = 0; i < nvqs; ++i) { 158 for (i = 0; i < nvqs; ++i) {
159 vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], 159 if (!names[i]) {
160 vqs[i] = NULL;
161 continue;
162 }
163
164 vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
160 ctx ? ctx[i] : false); 165 ctx ? ctx[i] : false);
161 if (IS_ERR(vqs[i])) { 166 if (IS_ERR(vqs[i])) {
162 ret = PTR_ERR(vqs[i]); 167 ret = PTR_ERR(vqs[i]);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index c21da9fe51ec..2e01bd833ffd 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -109,7 +109,7 @@ config RESET_QCOM_PDC
109 109
110config RESET_SIMPLE 110config RESET_SIMPLE
111 bool "Simple Reset Controller Driver" if COMPILE_TEST 111 bool "Simple Reset Controller Driver" if COMPILE_TEST
112 default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED 112 default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED
113 help 113 help
114 This enables a simple reset controller driver for reset lines that 114 This enables a simple reset controller driver for reset lines that
115 that can be asserted and deasserted by toggling bits in a contiguous, 115 that can be asserted and deasserted by toggling bits in a contiguous,
@@ -128,6 +128,14 @@ config RESET_STM32MP157
128 help 128 help
129 This enables the RCC reset controller driver for STM32 MPUs. 129 This enables the RCC reset controller driver for STM32 MPUs.
130 130
131config RESET_SOCFPGA
132 bool "SoCFPGA Reset Driver" if COMPILE_TEST && !ARCH_SOCFPGA
133 default ARCH_SOCFPGA
134 select RESET_SIMPLE
135 help
136 This enables the reset driver for the SoCFPGA ARMv7 platforms. This
137 driver gets initialized early during platform init calls.
138
131config RESET_SUNXI 139config RESET_SUNXI
132 bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI 140 bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
133 default ARCH_SUNXI 141 default ARCH_SUNXI
@@ -163,15 +171,15 @@ config RESET_UNIPHIER
163 Say Y if you want to control reset signals provided by System Control 171 Say Y if you want to control reset signals provided by System Control
164 block, Media I/O block, Peripheral Block. 172 block, Media I/O block, Peripheral Block.
165 173
166config RESET_UNIPHIER_USB3 174config RESET_UNIPHIER_GLUE
167 tristate "USB3 reset driver for UniPhier SoCs" 175 tristate "Reset driver in glue layer for UniPhier SoCs"
168 depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF 176 depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
169 default ARCH_UNIPHIER 177 default ARCH_UNIPHIER
170 select RESET_SIMPLE 178 select RESET_SIMPLE
171 help 179 help
172 Support for the USB3 core reset on UniPhier SoCs. 180 Support for peripheral core reset included in its own glue layer
173 Say Y if you want to control reset signals provided by 181 on UniPhier SoCs. Say Y if you want to control reset signals
174 USB3 glue layer. 182 provided by the glue layer.
175 183
176config RESET_ZYNQ 184config RESET_ZYNQ
177 bool "ZYNQ Reset Driver" if COMPILE_TEST 185 bool "ZYNQ Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index d08e8b90046a..dc7874df78d9 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -19,10 +19,11 @@ obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
19obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o 19obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
20obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o 20obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
21obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o 21obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o
22obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
22obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o 23obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
23obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o 24obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
24obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o 25obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
25obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o 26obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
26obj-$(CONFIG_RESET_UNIPHIER_USB3) += reset-uniphier-usb3.o 27obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
27obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o 28obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
28 29
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index d1887c0ed5d3..9582efb70025 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -795,3 +795,45 @@ devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
795 return rstc; 795 return rstc;
796} 796}
797EXPORT_SYMBOL_GPL(devm_reset_control_array_get); 797EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
798
799static int reset_control_get_count_from_lookup(struct device *dev)
800{
801 const struct reset_control_lookup *lookup;
802 const char *dev_id;
803 int count = 0;
804
805 if (!dev)
806 return -EINVAL;
807
808 dev_id = dev_name(dev);
809 mutex_lock(&reset_lookup_mutex);
810
811 list_for_each_entry(lookup, &reset_lookup_list, list) {
812 if (!strcmp(lookup->dev_id, dev_id))
813 count++;
814 }
815
816 mutex_unlock(&reset_lookup_mutex);
817
818 if (count == 0)
819 count = -ENOENT;
820
821 return count;
822}
823
824/**
825 * reset_control_get_count - Count number of resets available with a device
826 *
827 * @dev: device for which to return the number of resets
828 *
829 * Returns positive reset count on success, or error number on failure and
830 * on count being zero.
831 */
832int reset_control_get_count(struct device *dev)
833{
834 if (dev->of_node)
835 return of_reset_control_get_count(dev->of_node);
836
837 return reset_control_get_count_from_lookup(dev);
838}
839EXPORT_SYMBOL_GPL(reset_control_get_count);
diff --git a/drivers/reset/reset-hsdk.c b/drivers/reset/reset-hsdk.c
index 8bce391c6943..4c7b8647b49c 100644
--- a/drivers/reset/reset-hsdk.c
+++ b/drivers/reset/reset-hsdk.c
@@ -86,6 +86,7 @@ static int hsdk_reset_reset(struct reset_controller_dev *rcdev,
86 86
87static const struct reset_control_ops hsdk_reset_ops = { 87static const struct reset_control_ops hsdk_reset_ops = {
88 .reset = hsdk_reset_reset, 88 .reset = hsdk_reset_reset,
89 .deassert = hsdk_reset_reset,
89}; 90};
90 91
91static int hsdk_reset_probe(struct platform_device *pdev) 92static int hsdk_reset_probe(struct platform_device *pdev)
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index a91107fc9e27..77fbba3100c8 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -109,7 +109,7 @@ struct reset_simple_devdata {
109#define SOCFPGA_NR_BANKS 8 109#define SOCFPGA_NR_BANKS 8
110 110
111static const struct reset_simple_devdata reset_simple_socfpga = { 111static const struct reset_simple_devdata reset_simple_socfpga = {
112 .reg_offset = 0x10, 112 .reg_offset = 0x20,
113 .nr_resets = SOCFPGA_NR_BANKS * 32, 113 .nr_resets = SOCFPGA_NR_BANKS * 32,
114 .status_active_low = true, 114 .status_active_low = true,
115}; 115};
@@ -120,7 +120,8 @@ static const struct reset_simple_devdata reset_simple_active_low = {
120}; 120};
121 121
122static const struct of_device_id reset_simple_dt_ids[] = { 122static const struct of_device_id reset_simple_dt_ids[] = {
123 { .compatible = "altr,rst-mgr", .data = &reset_simple_socfpga }, 123 { .compatible = "altr,stratix10-rst-mgr",
124 .data = &reset_simple_socfpga },
124 { .compatible = "st,stm32-rcc", }, 125 { .compatible = "st,stm32-rcc", },
125 { .compatible = "allwinner,sun6i-a31-clock-reset", 126 { .compatible = "allwinner,sun6i-a31-clock-reset",
126 .data = &reset_simple_active_low }, 127 .data = &reset_simple_active_low },
@@ -166,14 +167,6 @@ static int reset_simple_probe(struct platform_device *pdev)
166 data->status_active_low = devdata->status_active_low; 167 data->status_active_low = devdata->status_active_low;
167 } 168 }
168 169
169 if (of_device_is_compatible(dev->of_node, "altr,rst-mgr") &&
170 of_property_read_u32(dev->of_node, "altr,modrst-offset",
171 &reg_offset)) {
172 dev_warn(dev,
173 "missing altr,modrst-offset property, assuming 0x%x!\n",
174 reg_offset);
175 }
176
177 data->membase += reg_offset; 170 data->membase += reg_offset;
178 171
179 return devm_reset_controller_register(dev, &data->rcdev); 172 return devm_reset_controller_register(dev, &data->rcdev);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
new file mode 100644
index 000000000000..318cfc51c441
--- /dev/null
+++ b/drivers/reset/reset-socfpga.c
@@ -0,0 +1,88 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018, Intel Corporation
4 * Copied from reset-sunxi.c
5 */
6
7#include <linux/err.h>
8#include <linux/io.h>
9#include <linux/init.h>
10#include <linux/of.h>
11#include <linux/of_address.h>
12#include <linux/platform_device.h>
13#include <linux/reset-controller.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17
18#include "reset-simple.h"
19
20#define SOCFPGA_NR_BANKS 8
21void __init socfpga_reset_init(void);
22
23static int a10_reset_init(struct device_node *np)
24{
25 struct reset_simple_data *data;
26 struct resource res;
27 resource_size_t size;
28 int ret;
29 u32 reg_offset = 0x10;
30
31 data = kzalloc(sizeof(*data), GFP_KERNEL);
32 if (!data)
33 return -ENOMEM;
34
35 ret = of_address_to_resource(np, 0, &res);
36 if (ret)
37 goto err_alloc;
38
39 size = resource_size(&res);
40 if (!request_mem_region(res.start, size, np->name)) {
41 ret = -EBUSY;
42 goto err_alloc;
43 }
44
45 data->membase = ioremap(res.start, size);
46 if (!data->membase) {
47 ret = -ENOMEM;
48 goto err_alloc;
49 }
50
51 if (of_property_read_u32(np, "altr,modrst-offset", &reg_offset))
52 pr_warn("missing altr,modrst-offset property, assuming 0x10\n");
53 data->membase += reg_offset;
54
55 spin_lock_init(&data->lock);
56
57 data->rcdev.owner = THIS_MODULE;
58 data->rcdev.nr_resets = SOCFPGA_NR_BANKS * 32;
59 data->rcdev.ops = &reset_simple_ops;
60 data->rcdev.of_node = np;
61 data->status_active_low = true;
62
63 return reset_controller_register(&data->rcdev);
64
65err_alloc:
66 kfree(data);
67 return ret;
68};
69
70/*
71 * These are the reset controller we need to initialize early on in
72 * our system, before we can even think of using a regular device
73 * driver for it.
74 * The controllers that we can register through the regular device
75 * model are handled by the simple reset driver directly.
76 */
77static const struct of_device_id socfpga_early_reset_dt_ids[] __initconst = {
78 { .compatible = "altr,rst-mgr", },
79 { /* sentinel */ },
80};
81
82void __init socfpga_reset_init(void)
83{
84 struct device_node *np;
85
86 for_each_matching_node(np, socfpga_early_reset_dt_ids)
87 a10_reset_init(np);
88}
diff --git a/drivers/reset/reset-uniphier-usb3.c b/drivers/reset/reset-uniphier-glue.c
index ffa1b19b594d..a45923f4df6d 100644
--- a/drivers/reset/reset-uniphier-usb3.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2// 2//
3// reset-uniphier-usb3.c - USB3 reset driver for UniPhier 3// reset-uniphier-glue.c - Glue layer reset driver for UniPhier
4// Copyright 2018 Socionext Inc. 4// Copyright 2018 Socionext Inc.
5// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> 5// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
6 6
@@ -15,24 +15,24 @@
15#define MAX_CLKS 2 15#define MAX_CLKS 2
16#define MAX_RSTS 2 16#define MAX_RSTS 2
17 17
18struct uniphier_usb3_reset_soc_data { 18struct uniphier_glue_reset_soc_data {
19 int nclks; 19 int nclks;
20 const char * const *clock_names; 20 const char * const *clock_names;
21 int nrsts; 21 int nrsts;
22 const char * const *reset_names; 22 const char * const *reset_names;
23}; 23};
24 24
25struct uniphier_usb3_reset_priv { 25struct uniphier_glue_reset_priv {
26 struct clk_bulk_data clk[MAX_CLKS]; 26 struct clk_bulk_data clk[MAX_CLKS];
27 struct reset_control *rst[MAX_RSTS]; 27 struct reset_control *rst[MAX_RSTS];
28 struct reset_simple_data rdata; 28 struct reset_simple_data rdata;
29 const struct uniphier_usb3_reset_soc_data *data; 29 const struct uniphier_glue_reset_soc_data *data;
30}; 30};
31 31
32static int uniphier_usb3_reset_probe(struct platform_device *pdev) 32static int uniphier_glue_reset_probe(struct platform_device *pdev)
33{ 33{
34 struct device *dev = &pdev->dev; 34 struct device *dev = &pdev->dev;
35 struct uniphier_usb3_reset_priv *priv; 35 struct uniphier_glue_reset_priv *priv;
36 struct resource *res; 36 struct resource *res;
37 resource_size_t size; 37 resource_size_t size;
38 const char *name; 38 const char *name;
@@ -100,9 +100,9 @@ out_rst_assert:
100 return ret; 100 return ret;
101} 101}
102 102
103static int uniphier_usb3_reset_remove(struct platform_device *pdev) 103static int uniphier_glue_reset_remove(struct platform_device *pdev)
104{ 104{
105 struct uniphier_usb3_reset_priv *priv = platform_get_drvdata(pdev); 105 struct uniphier_glue_reset_priv *priv = platform_get_drvdata(pdev);
106 int i; 106 int i;
107 107
108 for (i = 0; i < priv->data->nrsts; i++) 108 for (i = 0; i < priv->data->nrsts; i++)
@@ -117,7 +117,7 @@ static const char * const uniphier_pro4_clock_reset_names[] = {
117 "gio", "link", 117 "gio", "link",
118}; 118};
119 119
120static const struct uniphier_usb3_reset_soc_data uniphier_pro4_data = { 120static const struct uniphier_glue_reset_soc_data uniphier_pro4_data = {
121 .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names), 121 .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
122 .clock_names = uniphier_pro4_clock_reset_names, 122 .clock_names = uniphier_pro4_clock_reset_names,
123 .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names), 123 .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
@@ -128,14 +128,14 @@ static const char * const uniphier_pxs2_clock_reset_names[] = {
128 "link", 128 "link",
129}; 129};
130 130
131static const struct uniphier_usb3_reset_soc_data uniphier_pxs2_data = { 131static const struct uniphier_glue_reset_soc_data uniphier_pxs2_data = {
132 .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names), 132 .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
133 .clock_names = uniphier_pxs2_clock_reset_names, 133 .clock_names = uniphier_pxs2_clock_reset_names,
134 .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names), 134 .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
135 .reset_names = uniphier_pxs2_clock_reset_names, 135 .reset_names = uniphier_pxs2_clock_reset_names,
136}; 136};
137 137
138static const struct of_device_id uniphier_usb3_reset_match[] = { 138static const struct of_device_id uniphier_glue_reset_match[] = {
139 { 139 {
140 .compatible = "socionext,uniphier-pro4-usb3-reset", 140 .compatible = "socionext,uniphier-pro4-usb3-reset",
141 .data = &uniphier_pro4_data, 141 .data = &uniphier_pro4_data,
@@ -152,20 +152,32 @@ static const struct of_device_id uniphier_usb3_reset_match[] = {
152 .compatible = "socionext,uniphier-pxs3-usb3-reset", 152 .compatible = "socionext,uniphier-pxs3-usb3-reset",
153 .data = &uniphier_pxs2_data, 153 .data = &uniphier_pxs2_data,
154 }, 154 },
155 {
156 .compatible = "socionext,uniphier-pro4-ahci-reset",
157 .data = &uniphier_pro4_data,
158 },
159 {
160 .compatible = "socionext,uniphier-pxs2-ahci-reset",
161 .data = &uniphier_pxs2_data,
162 },
163 {
164 .compatible = "socionext,uniphier-pxs3-ahci-reset",
165 .data = &uniphier_pxs2_data,
166 },
155 { /* Sentinel */ } 167 { /* Sentinel */ }
156}; 168};
157MODULE_DEVICE_TABLE(of, uniphier_usb3_reset_match); 169MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match);
158 170
159static struct platform_driver uniphier_usb3_reset_driver = { 171static struct platform_driver uniphier_glue_reset_driver = {
160 .probe = uniphier_usb3_reset_probe, 172 .probe = uniphier_glue_reset_probe,
161 .remove = uniphier_usb3_reset_remove, 173 .remove = uniphier_glue_reset_remove,
162 .driver = { 174 .driver = {
163 .name = "uniphier-usb3-reset", 175 .name = "uniphier-glue-reset",
164 .of_match_table = uniphier_usb3_reset_match, 176 .of_match_table = uniphier_glue_reset_match,
165 }, 177 },
166}; 178};
167module_platform_driver(uniphier_usb3_reset_driver); 179module_platform_driver(uniphier_glue_reset_driver);
168 180
169MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>"); 181MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
170MODULE_DESCRIPTION("UniPhier USB3 Reset Driver"); 182MODULE_DESCRIPTION("UniPhier Glue layer reset driver");
171MODULE_LICENSE("GPL"); 183MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 4e7b55a14b1a..6e294b4d3635 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
4469 usrparm.psf_data &= 0x7fffffffULL; 4469 usrparm.psf_data &= 0x7fffffffULL;
4470 usrparm.rssd_result &= 0x7fffffffULL; 4470 usrparm.rssd_result &= 0x7fffffffULL;
4471 } 4471 }
4472 /* at least 2 bytes are accessed and should be allocated */
4473 if (usrparm.psf_data_len < 2) {
4474 DBF_DEV_EVENT(DBF_WARNING, device,
4475 "Symmetrix ioctl invalid data length %d",
4476 usrparm.psf_data_len);
4477 rc = -EINVAL;
4478 goto out;
4479 }
4472 /* alloc I/O data area */ 4480 /* alloc I/O data area */
4473 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 4481 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
4474 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 4482 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 194ffd5c8580..039b2074db7e 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
60 60
61static void __ref sclp_cpu_change_notify(struct work_struct *work) 61static void __ref sclp_cpu_change_notify(struct work_struct *work)
62{ 62{
63 lock_device_hotplug();
63 smp_rescan_cpus(); 64 smp_rescan_cpus();
65 unlock_device_hotplug();
64} 66}
65 67
66static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) 68static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 48ea0004a56d..5a699746c357 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
248static inline int ap_test_config_card_id(unsigned int id) 248static inline int ap_test_config_card_id(unsigned int id)
249{ 249{
250 if (!ap_configuration) /* QCI not supported */ 250 if (!ap_configuration) /* QCI not supported */
251 return 1; 251 /* only ids 0...3F may be probed */
252 return id < 0x40 ? 1 : 0;
252 return ap_test_config(ap_configuration->apm, id); 253 return ap_test_config(ap_configuration->apm, id);
253} 254}
254 255
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index dcbf5c857743..ed8e58f09054 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -89,8 +89,8 @@ static int register_sba(struct ism_dev *ism)
89 dma_addr_t dma_handle; 89 dma_addr_t dma_handle;
90 struct ism_sba *sba; 90 struct ism_sba *sba;
91 91
92 sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, 92 sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
93 &dma_handle, GFP_KERNEL); 93 GFP_KERNEL);
94 if (!sba) 94 if (!sba)
95 return -ENOMEM; 95 return -ENOMEM;
96 96
@@ -116,8 +116,8 @@ static int register_ieq(struct ism_dev *ism)
116 dma_addr_t dma_handle; 116 dma_addr_t dma_handle;
117 struct ism_eq *ieq; 117 struct ism_eq *ieq;
118 118
119 ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, 119 ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
120 &dma_handle, GFP_KERNEL); 120 GFP_KERNEL);
121 if (!ieq) 121 if (!ieq)
122 return -ENOMEM; 122 return -ENOMEM;
123 123
@@ -234,10 +234,9 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
234 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) 234 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
235 return -EINVAL; 235 return -EINVAL;
236 236
237 dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len, 237 dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
238 &dmb->dma_addr, GFP_KERNEL | 238 &dmb->dma_addr,
239 __GFP_NOWARN | __GFP_NOMEMALLOC | 239 GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
240 __GFP_COMP | __GFP_NORETRY);
241 if (!dmb->cpu_addr) 240 if (!dmb->cpu_addr)
242 clear_bit(dmb->sba_idx, ism->sba_bitmap); 241 clear_bit(dmb->sba_idx, ism->sba_bitmap);
243 242
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 0ee026947f20..122059ecad84 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -22,6 +22,7 @@
22#include <linux/hashtable.h> 22#include <linux/hashtable.h>
23#include <linux/ip.h> 23#include <linux/ip.h>
24#include <linux/refcount.h> 24#include <linux/refcount.h>
25#include <linux/workqueue.h>
25 26
26#include <net/ipv6.h> 27#include <net/ipv6.h>
27#include <net/if_inet6.h> 28#include <net/if_inet6.h>
@@ -789,6 +790,7 @@ struct qeth_card {
789 struct qeth_seqno seqno; 790 struct qeth_seqno seqno;
790 struct qeth_card_options options; 791 struct qeth_card_options options;
791 792
793 struct workqueue_struct *event_wq;
792 wait_queue_head_t wait_q; 794 wait_queue_head_t wait_q;
793 spinlock_t mclock; 795 spinlock_t mclock;
794 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 796 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -962,7 +964,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[];
962extern const struct attribute_group qeth_device_attr_group; 964extern const struct attribute_group qeth_device_attr_group;
963extern const struct attribute_group qeth_device_blkt_group; 965extern const struct attribute_group qeth_device_blkt_group;
964extern const struct device_type qeth_generic_devtype; 966extern const struct device_type qeth_generic_devtype;
965extern struct workqueue_struct *qeth_wq;
966 967
967int qeth_card_hw_is_reachable(struct qeth_card *); 968int qeth_card_hw_is_reachable(struct qeth_card *);
968const char *qeth_get_cardname_short(struct qeth_card *); 969const char *qeth_get_cardname_short(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e63e03143ca7..89f912213e62 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -74,8 +74,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
74static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); 74static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
75static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 75static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
76 76
77struct workqueue_struct *qeth_wq; 77static struct workqueue_struct *qeth_wq;
78EXPORT_SYMBOL_GPL(qeth_wq);
79 78
80int qeth_card_hw_is_reachable(struct qeth_card *card) 79int qeth_card_hw_is_reachable(struct qeth_card *card)
81{ 80{
@@ -566,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
566 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 565 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
567 rc, CARD_DEVID(card)); 566 rc, CARD_DEVID(card));
568 atomic_set(&channel->irq_pending, 0); 567 atomic_set(&channel->irq_pending, 0);
568 qeth_release_buffer(channel, iob);
569 card->read_or_write_problem = 1; 569 card->read_or_write_problem = 1;
570 qeth_schedule_recovery(card); 570 qeth_schedule_recovery(card);
571 wake_up(&card->wait_q); 571 wake_up(&card->wait_q);
@@ -1127,6 +1127,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1127 rc = qeth_get_problem(card, cdev, irb); 1127 rc = qeth_get_problem(card, cdev, irb);
1128 if (rc) { 1128 if (rc) {
1129 card->read_or_write_problem = 1; 1129 card->read_or_write_problem = 1;
1130 if (iob)
1131 qeth_release_buffer(iob->channel, iob);
1130 qeth_clear_ipacmd_list(card); 1132 qeth_clear_ipacmd_list(card);
1131 qeth_schedule_recovery(card); 1133 qeth_schedule_recovery(card);
1132 goto out; 1134 goto out;
@@ -1466,6 +1468,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1466 CARD_RDEV(card) = gdev->cdev[0]; 1468 CARD_RDEV(card) = gdev->cdev[0];
1467 CARD_WDEV(card) = gdev->cdev[1]; 1469 CARD_WDEV(card) = gdev->cdev[1];
1468 CARD_DDEV(card) = gdev->cdev[2]; 1470 CARD_DDEV(card) = gdev->cdev[2];
1471
1472 card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
1473 if (!card->event_wq)
1474 goto out_wq;
1469 if (qeth_setup_channel(&card->read, true)) 1475 if (qeth_setup_channel(&card->read, true))
1470 goto out_ip; 1476 goto out_ip;
1471 if (qeth_setup_channel(&card->write, true)) 1477 if (qeth_setup_channel(&card->write, true))
@@ -1481,6 +1487,8 @@ out_data:
1481out_channel: 1487out_channel:
1482 qeth_clean_channel(&card->read); 1488 qeth_clean_channel(&card->read);
1483out_ip: 1489out_ip:
1490 destroy_workqueue(card->event_wq);
1491out_wq:
1484 dev_set_drvdata(&gdev->dev, NULL); 1492 dev_set_drvdata(&gdev->dev, NULL);
1485 kfree(card); 1493 kfree(card);
1486out: 1494out:
@@ -1809,6 +1817,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card,
1809 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); 1817 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
1810 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 1818 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
1811 atomic_set(&channel->irq_pending, 0); 1819 atomic_set(&channel->irq_pending, 0);
1820 qeth_release_buffer(channel, iob);
1812 wake_up(&card->wait_q); 1821 wake_up(&card->wait_q);
1813 return rc; 1822 return rc;
1814 } 1823 }
@@ -1878,6 +1887,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
1878 rc); 1887 rc);
1879 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 1888 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1880 atomic_set(&channel->irq_pending, 0); 1889 atomic_set(&channel->irq_pending, 0);
1890 qeth_release_buffer(channel, iob);
1881 wake_up(&card->wait_q); 1891 wake_up(&card->wait_q);
1882 return rc; 1892 return rc;
1883 } 1893 }
@@ -2058,6 +2068,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2058 } 2068 }
2059 reply = qeth_alloc_reply(card); 2069 reply = qeth_alloc_reply(card);
2060 if (!reply) { 2070 if (!reply) {
2071 qeth_release_buffer(channel, iob);
2061 return -ENOMEM; 2072 return -ENOMEM;
2062 } 2073 }
2063 reply->callback = reply_cb; 2074 reply->callback = reply_cb;
@@ -2389,11 +2400,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2389 return 0; 2400 return 0;
2390} 2401}
2391 2402
2392static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) 2403static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2393{ 2404{
2394 if (!q) 2405 if (!q)
2395 return; 2406 return;
2396 2407
2408 qeth_clear_outq_buffers(q, 1);
2397 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2409 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2398 kfree(q); 2410 kfree(q);
2399} 2411}
@@ -2467,10 +2479,8 @@ out_freeoutqbufs:
2467 card->qdio.out_qs[i]->bufs[j] = NULL; 2479 card->qdio.out_qs[i]->bufs[j] = NULL;
2468 } 2480 }
2469out_freeoutq: 2481out_freeoutq:
2470 while (i > 0) { 2482 while (i > 0)
2471 qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); 2483 qeth_free_output_queue(card->qdio.out_qs[--i]);
2472 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2473 }
2474 kfree(card->qdio.out_qs); 2484 kfree(card->qdio.out_qs);
2475 card->qdio.out_qs = NULL; 2485 card->qdio.out_qs = NULL;
2476out_freepool: 2486out_freepool:
@@ -2503,10 +2513,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
2503 qeth_free_buffer_pool(card); 2513 qeth_free_buffer_pool(card);
2504 /* free outbound qdio_qs */ 2514 /* free outbound qdio_qs */
2505 if (card->qdio.out_qs) { 2515 if (card->qdio.out_qs) {
2506 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2516 for (i = 0; i < card->qdio.no_out_queues; i++)
2507 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); 2517 qeth_free_output_queue(card->qdio.out_qs[i]);
2508 qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
2509 }
2510 kfree(card->qdio.out_qs); 2518 kfree(card->qdio.out_qs);
2511 card->qdio.out_qs = NULL; 2519 card->qdio.out_qs = NULL;
2512 } 2520 }
@@ -5028,6 +5036,7 @@ static void qeth_core_free_card(struct qeth_card *card)
5028 qeth_clean_channel(&card->read); 5036 qeth_clean_channel(&card->read);
5029 qeth_clean_channel(&card->write); 5037 qeth_clean_channel(&card->write);
5030 qeth_clean_channel(&card->data); 5038 qeth_clean_channel(&card->data);
5039 destroy_workqueue(card->event_wq);
5031 qeth_free_qdio_buffers(card); 5040 qeth_free_qdio_buffers(card);
5032 unregister_service_level(&card->qeth_service_level); 5041 unregister_service_level(&card->qeth_service_level);
5033 dev_set_drvdata(&card->gdev->dev, NULL); 5042 dev_set_drvdata(&card->gdev->dev, NULL);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f108d4b44605..a43de2f9bcac 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -369,6 +369,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
369 qeth_clear_cmd_buffers(&card->read); 369 qeth_clear_cmd_buffers(&card->read);
370 qeth_clear_cmd_buffers(&card->write); 370 qeth_clear_cmd_buffers(&card->write);
371 } 371 }
372
373 flush_workqueue(card->event_wq);
372} 374}
373 375
374static int qeth_l2_process_inbound_buffer(struct qeth_card *card, 376static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
@@ -801,6 +803,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
801 803
802 if (cgdev->state == CCWGROUP_ONLINE) 804 if (cgdev->state == CCWGROUP_ONLINE)
803 qeth_l2_set_offline(cgdev); 805 qeth_l2_set_offline(cgdev);
806
807 cancel_work_sync(&card->close_dev_work);
804 if (qeth_netdev_is_registered(card->dev)) 808 if (qeth_netdev_is_registered(card->dev))
805 unregister_netdev(card->dev); 809 unregister_netdev(card->dev);
806} 810}
@@ -1434,7 +1438,7 @@ static void qeth_bridge_state_change(struct qeth_card *card,
1434 data->card = card; 1438 data->card = card;
1435 memcpy(&data->qports, qports, 1439 memcpy(&data->qports, qports,
1436 sizeof(struct qeth_sbp_state_change) + extrasize); 1440 sizeof(struct qeth_sbp_state_change) + extrasize);
1437 queue_work(qeth_wq, &data->worker); 1441 queue_work(card->event_wq, &data->worker);
1438} 1442}
1439 1443
1440struct qeth_bridge_host_data { 1444struct qeth_bridge_host_data {
@@ -1506,7 +1510,7 @@ static void qeth_bridge_host_event(struct qeth_card *card,
1506 data->card = card; 1510 data->card = card;
1507 memcpy(&data->hostevs, hostevs, 1511 memcpy(&data->hostevs, hostevs,
1508 sizeof(struct qeth_ipacmd_addr_change) + extrasize); 1512 sizeof(struct qeth_ipacmd_addr_change) + extrasize);
1509 queue_work(qeth_wq, &data->worker); 1513 queue_work(card->event_wq, &data->worker);
1510} 1514}
1511 1515
1512/* SETBRIDGEPORT support; sending commands */ 1516/* SETBRIDGEPORT support; sending commands */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 42a7cdc59b76..df34bff4ac31 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1433,6 +1433,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
1433 qeth_clear_cmd_buffers(&card->read); 1433 qeth_clear_cmd_buffers(&card->read);
1434 qeth_clear_cmd_buffers(&card->write); 1434 qeth_clear_cmd_buffers(&card->write);
1435 } 1435 }
1436
1437 flush_workqueue(card->event_wq);
1436} 1438}
1437 1439
1438/* 1440/*
@@ -2338,6 +2340,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2338 if (cgdev->state == CCWGROUP_ONLINE) 2340 if (cgdev->state == CCWGROUP_ONLINE)
2339 qeth_l3_set_offline(cgdev); 2341 qeth_l3_set_offline(cgdev);
2340 2342
2343 cancel_work_sync(&card->close_dev_work);
2341 if (qeth_netdev_is_registered(card->dev)) 2344 if (qeth_netdev_is_registered(card->dev))
2342 unregister_netdev(card->dev); 2345 unregister_netdev(card->dev);
2343 qeth_l3_clear_ip_htable(card, 0); 2346 qeth_l3_clear_ip_htable(card, 0);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9cf30d124b9e..e390f8c6d5f3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -403,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
403 goto failed; 403 goto failed;
404 404
405 /* report size limit per scatter-gather segment */ 405 /* report size limit per scatter-gather segment */
406 adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
407 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; 406 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
408 407
409 adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; 408 adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 00acc7144bbc..f4f6a07c5222 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -428,6 +428,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
428 .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 428 .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
429 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, 429 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
430 /* GCD, adjusted later */ 430 /* GCD, adjusted later */
431 /* report size limit per scatter-gather segment */
432 .max_segment_size = ZFCP_QDIO_SBALE_LEN,
431 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 433 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
432 .shost_attrs = zfcp_sysfs_shost_attrs, 434 .shost_attrs = zfcp_sysfs_shost_attrs,
433 .sdev_attrs = zfcp_sysfs_sdev_attrs, 435 .sdev_attrs = zfcp_sysfs_sdev_attrs,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index fc9dbad476c0..ae1d56da671d 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
635{ 635{
636 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 636 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
637 unsigned long *indicatorp = NULL; 637 unsigned long *indicatorp = NULL;
638 int ret, i; 638 int ret, i, queue_idx = 0;
639 struct ccw1 *ccw; 639 struct ccw1 *ccw;
640 640
641 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 641 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
@@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
643 return -ENOMEM; 643 return -ENOMEM;
644 644
645 for (i = 0; i < nvqs; ++i) { 645 for (i = 0; i < nvqs; ++i) {
646 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], 646 if (!names[i]) {
647 ctx ? ctx[i] : false, ccw); 647 vqs[i] = NULL;
648 continue;
649 }
650
651 vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
652 names[i], ctx ? ctx[i] : false,
653 ccw);
648 if (IS_ERR(vqs[i])) { 654 if (IS_ERR(vqs[i])) {
649 ret = PTR_ERR(vqs[i]); 655 ret = PTR_ERR(vqs[i]);
650 vqs[i] = NULL; 656 vqs[i] = NULL;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index e8f5f7c63190..cd096104bcec 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -646,8 +646,9 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
646 unsigned long *cpu_addr; 646 unsigned long *cpu_addr;
647 int retval = 1; 647 int retval = 1;
648 648
649 cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev, 649 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
650 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); 650 size * TW_Q_LENGTH, &dma_handle,
651 GFP_KERNEL);
651 if (!cpu_addr) { 652 if (!cpu_addr) {
652 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); 653 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
653 goto out; 654 goto out;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 128d658d472a..16957d7ac414 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
295 if(tpnt->sdev_attrs == NULL) 295 if(tpnt->sdev_attrs == NULL)
296 tpnt->sdev_attrs = NCR_700_dev_attrs; 296 tpnt->sdev_attrs = NCR_700_dev_attrs;
297 297
298 memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, 298 memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
299 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); 299 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
300 if(memory == NULL) { 300 if(memory == NULL) {
301 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); 301 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index ff53fd0d12f2..66c514310f3c 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1123,8 +1123,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
1123 1123
1124 /* Get total memory needed for SCB */ 1124 /* Get total memory needed for SCB */
1125 sz = ORC_MAXQUEUE * sizeof(struct orc_scb); 1125 sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
1126 host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys, 1126 host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys,
1127 GFP_KERNEL); 1127 GFP_KERNEL);
1128 if (!host->scb_virt) { 1128 if (!host->scb_virt) {
1129 printk("inia100: SCB memory allocation error\n"); 1129 printk("inia100: SCB memory allocation error\n");
1130 goto out_host_put; 1130 goto out_host_put;
@@ -1132,8 +1132,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
1132 1132
1133 /* Get total memory needed for ESCB */ 1133 /* Get total memory needed for ESCB */
1134 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); 1134 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
1135 host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys, 1135 host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys,
1136 GFP_KERNEL); 1136 GFP_KERNEL);
1137 if (!host->escb_virt) { 1137 if (!host->escb_virt) {
1138 printk("inia100: ESCB memory allocation error\n"); 1138 printk("inia100: ESCB memory allocation error\n");
1139 goto out_free_scb_array; 1139 goto out_free_scb_array;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 634ddb90e7aa..7e56a11836c1 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1747,11 +1747,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1747 shost->max_sectors = (shost->sg_tablesize * 8) + 112; 1747 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1748 } 1748 }
1749 1749
1750 error = dma_set_max_seg_size(&pdev->dev, 1750 if (aac->adapter_info.options & AAC_OPT_NEW_COMM)
1751 (aac->adapter_info.options & AAC_OPT_NEW_COMM) ? 1751 shost->max_segment_size = shost->max_sectors << 9;
1752 (shost->max_sectors << 9) : 65536); 1752 else
1753 if (error) 1753 shost->max_segment_size = 65536;
1754 goto out_deinit;
1755 1754
1756 /* 1755 /*
1757 * Firmware printf works only with older firmware. 1756 * Firmware printf works only with older firmware.
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index f83f79b07b50..07efcb9b5b94 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -280,7 +280,7 @@ static ssize_t asd_show_dev_rev(struct device *dev,
280 return snprintf(buf, PAGE_SIZE, "%s\n", 280 return snprintf(buf, PAGE_SIZE, "%s\n",
281 asd_dev_rev[asd_ha->revision_id]); 281 asd_dev_rev[asd_ha->revision_id]);
282} 282}
283static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); 283static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
284 284
285static ssize_t asd_show_dev_bios_build(struct device *dev, 285static ssize_t asd_show_dev_bios_build(struct device *dev,
286 struct device_attribute *attr,char *buf) 286 struct device_attribute *attr,char *buf)
@@ -477,7 +477,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
477{ 477{
478 int err; 478 int err;
479 479
480 err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); 480 err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
481 if (err) 481 if (err)
482 return err; 482 return err;
483 483
@@ -499,13 +499,13 @@ err_update_bios:
499err_biosb: 499err_biosb:
500 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); 500 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
501err_rev: 501err_rev:
502 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); 502 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
503 return err; 503 return err;
504} 504}
505 505
506static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) 506static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
507{ 507{
508 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); 508 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
509 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); 509 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
510 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); 510 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
511 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); 511 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 0f6751b0a633..57c6fa388bf6 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -587,8 +587,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
587 case ACB_ADAPTER_TYPE_B: { 587 case ACB_ADAPTER_TYPE_B: {
588 struct MessageUnit_B *reg; 588 struct MessageUnit_B *reg;
589 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); 589 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
590 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 590 dma_coherent = dma_alloc_coherent(&pdev->dev,
591 &dma_coherent_handle, GFP_KERNEL); 591 acb->roundup_ccbsize,
592 &dma_coherent_handle,
593 GFP_KERNEL);
592 if (!dma_coherent) { 594 if (!dma_coherent) {
593 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 595 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
594 return false; 596 return false;
@@ -617,8 +619,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
617 struct MessageUnit_D *reg; 619 struct MessageUnit_D *reg;
618 620
619 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); 621 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
620 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 622 dma_coherent = dma_alloc_coherent(&pdev->dev,
621 &dma_coherent_handle, GFP_KERNEL); 623 acb->roundup_ccbsize,
624 &dma_coherent_handle,
625 GFP_KERNEL);
622 if (!dma_coherent) { 626 if (!dma_coherent) {
623 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 627 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
624 return false; 628 return false;
@@ -659,8 +663,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
659 uint32_t completeQ_size; 663 uint32_t completeQ_size;
660 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; 664 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
661 acb->roundup_ccbsize = roundup(completeQ_size, 32); 665 acb->roundup_ccbsize = roundup(completeQ_size, 32);
662 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 666 dma_coherent = dma_alloc_coherent(&pdev->dev,
663 &dma_coherent_handle, GFP_KERNEL); 667 acb->roundup_ccbsize,
668 &dma_coherent_handle,
669 GFP_KERNEL);
664 if (!dma_coherent){ 670 if (!dma_coherent){
665 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 671 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
666 return false; 672 return false;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 39f3820572b4..74e260027c7d 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3321,8 +3321,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3321 q->len = len; 3321 q->len = len;
3322 q->entry_size = entry_size; 3322 q->entry_size = entry_size;
3323 mem->size = len * entry_size; 3323 mem->size = len * entry_size;
3324 mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3324 mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
3325 GFP_KERNEL); 3325 GFP_KERNEL);
3326 if (!mem->va) 3326 if (!mem->va)
3327 return -ENOMEM; 3327 return -ENOMEM;
3328 return 0; 3328 return 0;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index ca7b7bbc8371..d4febaadfaa3 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -293,8 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
293 struct be_dma_mem *cmd, 293 struct be_dma_mem *cmd,
294 u8 subsystem, u8 opcode, u32 size) 294 u8 subsystem, u8 opcode, u32 size)
295{ 295{
296 cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, 296 cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
297 GFP_KERNEL); 297 GFP_KERNEL);
298 if (!cmd->va) { 298 if (!cmd->va) {
299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
300 "BG_%d : Failed to allocate memory for if info\n"); 300 "BG_%d : Failed to allocate memory for if info\n");
@@ -1510,10 +1510,9 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
1510 return -EINVAL; 1510 return -EINVAL;
1511 1511
1512 nonemb_cmd.size = sizeof(union be_invldt_cmds_params); 1512 nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
1513 nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, 1513 nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
1514 nonemb_cmd.size, 1514 nonemb_cmd.size, &nonemb_cmd.dma,
1515 &nonemb_cmd.dma, 1515 GFP_KERNEL);
1516 GFP_KERNEL);
1517 if (!nonemb_cmd.va) { 1516 if (!nonemb_cmd.va) {
1518 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 1517 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
1519 "BM_%d : invldt_cmds_params alloc failed\n"); 1518 "BM_%d : invldt_cmds_params alloc failed\n");
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 5d163ca1b366..d8e6d7480f35 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3264,9 +3264,9 @@ bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
3264 /* Allocate dma coherent memory */ 3264 /* Allocate dma coherent memory */
3265 buf_info = buf_base; 3265 buf_info = buf_base;
3266 buf_info->size = payload_len; 3266 buf_info->size = payload_len;
3267 buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev, 3267 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev,
3268 buf_info->size, &buf_info->phys, 3268 buf_info->size, &buf_info->phys,
3269 GFP_KERNEL); 3269 GFP_KERNEL);
3270 if (!buf_info->virt) 3270 if (!buf_info->virt)
3271 goto out_free_mem; 3271 goto out_free_mem;
3272 3272
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index e8ae4d671d23..039328d9ef13 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,10 +1857,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1857 * entries. Hence the limit with one page is 8192 task context 1857 * entries. Hence the limit with one page is 8192 task context
1858 * entries. 1858 * entries.
1859 */ 1859 */
1860 hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev, 1860 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1861 PAGE_SIZE, 1861 PAGE_SIZE,
1862 &hba->task_ctx_bd_dma, 1862 &hba->task_ctx_bd_dma,
1863 GFP_KERNEL); 1863 GFP_KERNEL);
1864 if (!hba->task_ctx_bd_tbl) { 1864 if (!hba->task_ctx_bd_tbl) {
1865 printk(KERN_ERR PFX "unable to allocate task context BDT\n"); 1865 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1866 rc = -1; 1866 rc = -1;
@@ -1894,10 +1894,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1894 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1894 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1895 for (i = 0; i < task_ctx_arr_sz; i++) { 1895 for (i = 0; i < task_ctx_arr_sz; i++) {
1896 1896
1897 hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev, 1897 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1898 PAGE_SIZE, 1898 PAGE_SIZE,
1899 &hba->task_ctx_dma[i], 1899 &hba->task_ctx_dma[i],
1900 GFP_KERNEL); 1900 GFP_KERNEL);
1901 if (!hba->task_ctx[i]) { 1901 if (!hba->task_ctx[i]) {
1902 printk(KERN_ERR PFX "unable to alloc task context\n"); 1902 printk(KERN_ERR PFX "unable to alloc task context\n");
1903 rc = -1; 1903 rc = -1;
@@ -2031,19 +2031,19 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2031 } 2031 }
2032 2032
2033 for (i = 0; i < segment_count; ++i) { 2033 for (i = 0; i < segment_count; ++i) {
2034 hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev, 2034 hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
2035 BNX2FC_HASH_TBL_CHUNK_SIZE, 2035 BNX2FC_HASH_TBL_CHUNK_SIZE,
2036 &dma_segment_array[i], 2036 &dma_segment_array[i],
2037 GFP_KERNEL); 2037 GFP_KERNEL);
2038 if (!hba->hash_tbl_segments[i]) { 2038 if (!hba->hash_tbl_segments[i]) {
2039 printk(KERN_ERR PFX "hash segment alloc failed\n"); 2039 printk(KERN_ERR PFX "hash segment alloc failed\n");
2040 goto cleanup_dma; 2040 goto cleanup_dma;
2041 } 2041 }
2042 } 2042 }
2043 2043
2044 hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2044 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2045 &hba->hash_tbl_pbl_dma, 2045 &hba->hash_tbl_pbl_dma,
2046 GFP_KERNEL); 2046 GFP_KERNEL);
2047 if (!hba->hash_tbl_pbl) { 2047 if (!hba->hash_tbl_pbl) {
2048 printk(KERN_ERR PFX "hash table pbl alloc failed\n"); 2048 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2049 goto cleanup_dma; 2049 goto cleanup_dma;
@@ -2104,10 +2104,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2104 return -ENOMEM; 2104 return -ENOMEM;
2105 2105
2106 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); 2106 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2107 hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev, 2107 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2108 mem_size, 2108 &hba->t2_hash_tbl_ptr_dma,
2109 &hba->t2_hash_tbl_ptr_dma, 2109 GFP_KERNEL);
2110 GFP_KERNEL);
2111 if (!hba->t2_hash_tbl_ptr) { 2110 if (!hba->t2_hash_tbl_ptr) {
2112 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); 2111 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2113 bnx2fc_free_fw_resc(hba); 2112 bnx2fc_free_fw_resc(hba);
@@ -2116,9 +2115,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2116 2115
2117 mem_size = BNX2FC_NUM_MAX_SESS * 2116 mem_size = BNX2FC_NUM_MAX_SESS *
2118 sizeof(struct fcoe_t2_hash_table_entry); 2117 sizeof(struct fcoe_t2_hash_table_entry);
2119 hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size, 2118 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2120 &hba->t2_hash_tbl_dma, 2119 &hba->t2_hash_tbl_dma,
2121 GFP_KERNEL); 2120 GFP_KERNEL);
2122 if (!hba->t2_hash_tbl) { 2121 if (!hba->t2_hash_tbl) {
2123 printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); 2122 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2124 bnx2fc_free_fw_resc(hba); 2123 bnx2fc_free_fw_resc(hba);
@@ -2140,9 +2139,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2140 return -ENOMEM; 2139 return -ENOMEM;
2141 } 2140 }
2142 2141
2143 hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2142 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2144 &hba->stats_buf_dma, 2143 &hba->stats_buf_dma,
2145 GFP_KERNEL); 2144 GFP_KERNEL);
2146 if (!hba->stats_buffer) { 2145 if (!hba->stats_buffer) {
2147 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); 2146 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2148 bnx2fc_free_fw_resc(hba); 2147 bnx2fc_free_fw_resc(hba);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 350257c13a5b..bc9f2a2365f4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
240 return NULL; 240 return NULL;
241 } 241 }
242 242
243 cmgr->hba = hba;
243 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), 244 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
244 GFP_KERNEL); 245 GFP_KERNEL);
245 if (!cmgr->free_list) { 246 if (!cmgr->free_list) {
@@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
256 goto mem_err; 257 goto mem_err;
257 } 258 }
258 259
259 cmgr->hba = hba;
260 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 260 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
261 261
262 for (i = 0; i < arr_sz; i++) { 262 for (i = 0; i < arr_sz; i++) {
@@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
295 295
296 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 296 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
297 mem_size = num_ios * sizeof(struct io_bdt *); 297 mem_size = num_ios * sizeof(struct io_bdt *);
298 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 298 cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
299 if (!cmgr->io_bdt_pool) { 299 if (!cmgr->io_bdt_pool) {
300 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 300 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
301 goto mem_err; 301 goto mem_err;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index e3d1c7c440c8..d735e87e416a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,8 +672,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
672 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & 672 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
673 CNIC_PAGE_MASK; 673 CNIC_PAGE_MASK;
674 674
675 tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 675 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
676 &tgt->sq_dma, GFP_KERNEL); 676 &tgt->sq_dma, GFP_KERNEL);
677 if (!tgt->sq) { 677 if (!tgt->sq) {
678 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", 678 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
679 tgt->sq_mem_size); 679 tgt->sq_mem_size);
@@ -685,8 +685,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
685 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & 685 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
686 CNIC_PAGE_MASK; 686 CNIC_PAGE_MASK;
687 687
688 tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 688 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
689 &tgt->cq_dma, GFP_KERNEL); 689 &tgt->cq_dma, GFP_KERNEL);
690 if (!tgt->cq) { 690 if (!tgt->cq) {
691 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", 691 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
692 tgt->cq_mem_size); 692 tgt->cq_mem_size);
@@ -698,8 +698,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
698 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & 698 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
699 CNIC_PAGE_MASK; 699 CNIC_PAGE_MASK;
700 700
701 tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 701 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
702 &tgt->rq_dma, GFP_KERNEL); 702 &tgt->rq_dma, GFP_KERNEL);
703 if (!tgt->rq) { 703 if (!tgt->rq) {
704 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", 704 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
705 tgt->rq_mem_size); 705 tgt->rq_mem_size);
@@ -710,8 +710,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
710 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & 710 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
711 CNIC_PAGE_MASK; 711 CNIC_PAGE_MASK;
712 712
713 tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 713 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
714 &tgt->rq_pbl_dma, GFP_KERNEL); 714 &tgt->rq_pbl_dma, GFP_KERNEL);
715 if (!tgt->rq_pbl) { 715 if (!tgt->rq_pbl) {
716 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", 716 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
717 tgt->rq_pbl_size); 717 tgt->rq_pbl_size);
@@ -735,9 +735,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
735 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & 735 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
736 CNIC_PAGE_MASK; 736 CNIC_PAGE_MASK;
737 737
738 tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev, 738 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
739 tgt->xferq_mem_size, &tgt->xferq_dma, 739 tgt->xferq_mem_size, &tgt->xferq_dma,
740 GFP_KERNEL); 740 GFP_KERNEL);
741 if (!tgt->xferq) { 741 if (!tgt->xferq) {
742 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", 742 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
743 tgt->xferq_mem_size); 743 tgt->xferq_mem_size);
@@ -749,9 +749,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
749 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & 749 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
750 CNIC_PAGE_MASK; 750 CNIC_PAGE_MASK;
751 751
752 tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev, 752 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
753 tgt->confq_mem_size, &tgt->confq_dma, 753 tgt->confq_mem_size, &tgt->confq_dma,
754 GFP_KERNEL); 754 GFP_KERNEL);
755 if (!tgt->confq) { 755 if (!tgt->confq) {
756 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", 756 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
757 tgt->confq_mem_size); 757 tgt->confq_mem_size);
@@ -763,9 +763,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
763 tgt->confq_pbl_size = 763 tgt->confq_pbl_size =
764 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; 764 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
765 765
766 tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, 766 tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
767 tgt->confq_pbl_size, 767 tgt->confq_pbl_size,
768 &tgt->confq_pbl_dma, GFP_KERNEL); 768 &tgt->confq_pbl_dma, GFP_KERNEL);
769 if (!tgt->confq_pbl) { 769 if (!tgt->confq_pbl) {
770 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", 770 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
771 tgt->confq_pbl_size); 771 tgt->confq_pbl_size);
@@ -787,9 +787,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
787 /* Allocate and map ConnDB */ 787 /* Allocate and map ConnDB */
788 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); 788 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
789 789
790 tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev, 790 tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
791 tgt->conn_db_mem_size, 791 tgt->conn_db_mem_size,
792 &tgt->conn_db_dma, GFP_KERNEL); 792 &tgt->conn_db_dma, GFP_KERNEL);
793 if (!tgt->conn_db) { 793 if (!tgt->conn_db) {
794 printk(KERN_ERR PFX "unable to allocate conn_db %d\n", 794 printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
795 tgt->conn_db_mem_size); 795 tgt->conn_db_mem_size);
@@ -802,8 +802,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
802 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & 802 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
803 CNIC_PAGE_MASK; 803 CNIC_PAGE_MASK;
804 804
805 tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 805 tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
806 &tgt->lcq_dma, GFP_KERNEL); 806 &tgt->lcq_dma, GFP_KERNEL);
807 807
808 if (!tgt->lcq) { 808 if (!tgt->lcq) {
809 printk(KERN_ERR PFX "unable to allocate lcq %d\n", 809 printk(KERN_ERR PFX "unable to allocate lcq %d\n",
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 91f5316aa3ab..fae6f71e677d 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1070,8 +1070,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1070 1070
1071 /* Allocate memory area for actual SQ element */ 1071 /* Allocate memory area for actual SQ element */
1072 ep->qp.sq_virt = 1072 ep->qp.sq_virt =
1073 dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, 1073 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1074 &ep->qp.sq_phys, GFP_KERNEL); 1074 &ep->qp.sq_phys, GFP_KERNEL);
1075 if (!ep->qp.sq_virt) { 1075 if (!ep->qp.sq_virt) {
1076 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", 1076 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
1077 ep->qp.sq_mem_size); 1077 ep->qp.sq_mem_size);
@@ -1106,8 +1106,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1106 1106
1107 /* Allocate memory area for actual CQ element */ 1107 /* Allocate memory area for actual CQ element */
1108 ep->qp.cq_virt = 1108 ep->qp.cq_virt =
1109 dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, 1109 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1110 &ep->qp.cq_phys, GFP_KERNEL); 1110 &ep->qp.cq_phys, GFP_KERNEL);
1111 if (!ep->qp.cq_virt) { 1111 if (!ep->qp.cq_virt) {
1112 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", 1112 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
1113 ep->qp.cq_mem_size); 1113 ep->qp.cq_mem_size);
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 8a004036e3d7..9bd2bd8dc2be 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable)
594 } 594 }
595 595
596 fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); 596 fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
597 ln->fc_vport = fc_vport;
597 598
598 if (csio_fcoe_alloc_vnp(hw, ln)) 599 if (csio_fcoe_alloc_vnp(hw, ln))
599 goto error; 600 goto error;
600 601
601 *(struct csio_lnode **)fc_vport->dd_data = ln; 602 *(struct csio_lnode **)fc_vport->dd_data = ln;
602 ln->fc_vport = fc_vport;
603 if (!fc_vport->node_name) 603 if (!fc_vport->node_name)
604 fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); 604 fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
605 if (!fc_vport->port_name) 605 if (!fc_vport->port_name)
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index dc12933533d5..66bbd21819ae 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -233,8 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
233 233
234 q = wrm->q_arr[free_idx]; 234 q = wrm->q_arr[free_idx];
235 235
236 q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart, 236 q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
237 GFP_KERNEL); 237 GFP_KERNEL);
238 if (!q->vstart) { 238 if (!q->vstart) {
239 csio_err(hw, 239 csio_err(hw,
240 "Failed to allocate DMA memory for " 240 "Failed to allocate DMA memory for "
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 8a20411699d9..75e1273a44b3 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
1144} 1144}
1145 1145
1146static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, 1146static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1147 unsigned int tid, int pg_idx, bool reply) 1147 unsigned int tid, int pg_idx)
1148{ 1148{
1149 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, 1149 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1150 GFP_KERNEL); 1150 GFP_KERNEL);
@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1160 req = (struct cpl_set_tcb_field *)skb->head; 1160 req = (struct cpl_set_tcb_field *)skb->head;
1161 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1161 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1162 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1162 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1163 req->reply = V_NO_REPLY(reply ? 0 : 1); 1163 req->reply = V_NO_REPLY(1);
1164 req->cpu_idx = 0; 1164 req->cpu_idx = 0;
1165 req->word = htons(31); 1165 req->word = htons(31);
1166 req->mask = cpu_to_be64(0xF0000000); 1166 req->mask = cpu_to_be64(0xF0000000);
@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1177 * @tid: connection id 1177 * @tid: connection id
1178 * @hcrc: header digest enabled 1178 * @hcrc: header digest enabled
1179 * @dcrc: data digest enabled 1179 * @dcrc: data digest enabled
1180 * @reply: request reply from h/w
1181 * set up the iscsi digest settings for a connection identified by tid 1180 * set up the iscsi digest settings for a connection identified by tid
1182 */ 1181 */
1183static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 1182static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1184 int hcrc, int dcrc, int reply) 1183 int hcrc, int dcrc)
1185{ 1184{
1186 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, 1185 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1187 GFP_KERNEL); 1186 GFP_KERNEL);
@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1197 req = (struct cpl_set_tcb_field *)skb->head; 1196 req = (struct cpl_set_tcb_field *)skb->head;
1198 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1197 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1199 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1198 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1200 req->reply = V_NO_REPLY(reply ? 0 : 1); 1199 req->reply = V_NO_REPLY(1);
1201 req->cpu_idx = 0; 1200 req->cpu_idx = 0;
1202 req->word = htons(31); 1201 req->word = htons(31);
1203 req->mask = cpu_to_be64(0x0F000000); 1202 req->mask = cpu_to_be64(0x0F000000);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 49f8028ac524..d26f50af00ea 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1548 struct cxgbi_sock *csk; 1548 struct cxgbi_sock *csk;
1549 1549
1550 csk = lookup_tid(t, tid); 1550 csk = lookup_tid(t, tid);
1551 if (!csk) 1551 if (!csk) {
1552 pr_err("can't find conn. for tid %u.\n", tid); 1552 pr_err("can't find conn. for tid %u.\n", tid);
1553 return;
1554 }
1553 1555
1554 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1556 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1555 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1557 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1556 csk, csk->state, csk->flags, csk->tid, rpl->status); 1558 csk, csk->state, csk->flags, csk->tid, rpl->status);
1557 1559
1558 if (rpl->status != CPL_ERR_NONE) 1560 if (rpl->status != CPL_ERR_NONE) {
1559 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1561 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1560 csk, tid, rpl->status); 1562 csk, tid, rpl->status);
1563 csk->err = -EINVAL;
1564 }
1565
1566 complete(&csk->cmpl);
1561 1567
1562 __kfree_skb(skb); 1568 __kfree_skb(skb);
1563} 1569}
@@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1983} 1989}
1984 1990
1985static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1991static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1986 int pg_idx, bool reply) 1992 int pg_idx)
1987{ 1993{
1988 struct sk_buff *skb; 1994 struct sk_buff *skb;
1989 struct cpl_set_tcb_field *req; 1995 struct cpl_set_tcb_field *req;
@@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1999 req = (struct cpl_set_tcb_field *)skb->head; 2005 req = (struct cpl_set_tcb_field *)skb->head;
2000 INIT_TP_WR(req, csk->tid); 2006 INIT_TP_WR(req, csk->tid);
2001 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 2007 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2002 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 2008 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2003 req->word_cookie = htons(0); 2009 req->word_cookie = htons(0);
2004 req->mask = cpu_to_be64(0x3 << 8); 2010 req->mask = cpu_to_be64(0x3 << 8);
2005 req->val = cpu_to_be64(pg_idx << 8); 2011 req->val = cpu_to_be64(pg_idx << 8);
@@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2008 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2014 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2009 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 2015 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2010 2016
2017 reinit_completion(&csk->cmpl);
2011 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2018 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2012 return 0; 2019 wait_for_completion(&csk->cmpl);
2020
2021 return csk->err;
2013} 2022}
2014 2023
2015static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 2024static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2016 int hcrc, int dcrc, int reply) 2025 int hcrc, int dcrc)
2017{ 2026{
2018 struct sk_buff *skb; 2027 struct sk_buff *skb;
2019 struct cpl_set_tcb_field *req; 2028 struct cpl_set_tcb_field *req;
@@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2031 req = (struct cpl_set_tcb_field *)skb->head; 2040 req = (struct cpl_set_tcb_field *)skb->head;
2032 INIT_TP_WR(req, tid); 2041 INIT_TP_WR(req, tid);
2033 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 2042 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2034 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 2043 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2035 req->word_cookie = htons(0); 2044 req->word_cookie = htons(0);
2036 req->mask = cpu_to_be64(0x3 << 4); 2045 req->mask = cpu_to_be64(0x3 << 4);
2037 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 2046 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
@@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2041 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2050 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2042 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 2051 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2043 2052
2053 reinit_completion(&csk->cmpl);
2044 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2054 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2045 return 0; 2055 wait_for_completion(&csk->cmpl);
2056
2057 return csk->err;
2046} 2058}
2047 2059
2048static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) 2060static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 75f876409fb9..245742557c03 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
573 skb_queue_head_init(&csk->receive_queue); 573 skb_queue_head_init(&csk->receive_queue);
574 skb_queue_head_init(&csk->write_queue); 574 skb_queue_head_init(&csk->write_queue);
575 timer_setup(&csk->retry_timer, NULL, 0); 575 timer_setup(&csk->retry_timer, NULL, 0);
576 init_completion(&csk->cmpl);
576 rwlock_init(&csk->callback_lock); 577 rwlock_init(&csk->callback_lock);
577 csk->cdev = cdev; 578 csk->cdev = cdev;
578 csk->flags = 0; 579 csk->flags = 0;
@@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2251 if (!err && conn->hdrdgst_en) 2252 if (!err && conn->hdrdgst_en)
2252 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2253 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2253 conn->hdrdgst_en, 2254 conn->hdrdgst_en,
2254 conn->datadgst_en, 0); 2255 conn->datadgst_en);
2255 break; 2256 break;
2256 case ISCSI_PARAM_DATADGST_EN: 2257 case ISCSI_PARAM_DATADGST_EN:
2257 err = iscsi_set_param(cls_conn, param, buf, buflen); 2258 err = iscsi_set_param(cls_conn, param, buf, buflen);
2258 if (!err && conn->datadgst_en) 2259 if (!err && conn->datadgst_en)
2259 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2260 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2260 conn->hdrdgst_en, 2261 conn->hdrdgst_en,
2261 conn->datadgst_en, 0); 2262 conn->datadgst_en);
2262 break; 2263 break;
2263 case ISCSI_PARAM_MAX_R2T: 2264 case ISCSI_PARAM_MAX_R2T:
2264 return iscsi_tcp_set_max_r2t(conn, buf); 2265 return iscsi_tcp_set_max_r2t(conn, buf);
@@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2384 2385
2385 ppm = csk->cdev->cdev2ppm(csk->cdev); 2386 ppm = csk->cdev->cdev2ppm(csk->cdev);
2386 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, 2387 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
2387 ppm->tformat.pgsz_idx_dflt, 0); 2388 ppm->tformat.pgsz_idx_dflt);
2388 if (err < 0) 2389 if (err < 0)
2389 return err; 2390 return err;
2390 2391
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 5d5d8b50d842..1917ff57651d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -149,6 +149,7 @@ struct cxgbi_sock {
149 struct sk_buff_head receive_queue; 149 struct sk_buff_head receive_queue;
150 struct sk_buff_head write_queue; 150 struct sk_buff_head write_queue;
151 struct timer_list retry_timer; 151 struct timer_list retry_timer;
152 struct completion cmpl;
152 int err; 153 int err;
153 rwlock_t callback_lock; 154 rwlock_t callback_lock;
154 void *user_data; 155 void *user_data;
@@ -490,9 +491,9 @@ struct cxgbi_device {
490 struct cxgbi_ppm *, 491 struct cxgbi_ppm *,
491 struct cxgbi_task_tag_info *); 492 struct cxgbi_task_tag_info *);
492 int (*csk_ddp_setup_digest)(struct cxgbi_sock *, 493 int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
493 unsigned int, int, int, int); 494 unsigned int, int, int);
494 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, 495 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
495 unsigned int, int, bool); 496 unsigned int, int);
496 497
497 void (*csk_release_offload_resources)(struct cxgbi_sock *); 498 void (*csk_release_offload_resources)(struct cxgbi_sock *);
498 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); 499 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index bfa13e3b191c..c8bad2c093b8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3687,6 +3687,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
3687 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; 3687 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3688 3688
3689 cfg = shost_priv(host); 3689 cfg = shost_priv(host);
3690 cfg->state = STATE_PROBING;
3690 cfg->host = host; 3691 cfg->host = host;
3691 rc = alloc_mem(cfg); 3692 rc = alloc_mem(cfg);
3692 if (rc) { 3693 if (rc) {
@@ -3775,6 +3776,7 @@ out:
3775 return rc; 3776 return rc;
3776 3777
3777out_remove: 3778out_remove:
3779 cfg->state = STATE_PROBED;
3778 cxlflash_remove(pdev); 3780 cxlflash_remove(pdev);
3779 goto out; 3781 goto out;
3780} 3782}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index e2420a810e99..c92b3822c408 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2507,6 +2507,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2507 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2507 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2508 } 2508 }
2509 2509
2510 if (hisi_hba->prot_mask) {
2511 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
2512 prot_mask);
2513 scsi_host_set_prot(hisi_hba->shost, prot_mask);
2514 }
2515
2510 rc = scsi_add_host(shost, dev); 2516 rc = scsi_add_host(shost, dev);
2511 if (rc) 2517 if (rc)
2512 goto err_out_ha; 2518 goto err_out_ha;
@@ -2519,12 +2525,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2519 if (rc) 2525 if (rc)
2520 goto err_out_register_ha; 2526 goto err_out_register_ha;
2521 2527
2522 if (hisi_hba->prot_mask) {
2523 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
2524 prot_mask);
2525 scsi_host_set_prot(hisi_hba->shost, prot_mask);
2526 }
2527
2528 scsi_scan_host(shost); 2528 scsi_scan_host(shost);
2529 2529
2530 return 0; 2530 return 0;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 68b90c4f79a3..1727d0c71b12 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -576,6 +576,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
576 shost->max_lun = ~0; 576 shost->max_lun = ~0;
577 shost->max_cmd_len = MAX_COMMAND_SIZE; 577 shost->max_cmd_len = MAX_COMMAND_SIZE;
578 578
579 /* turn on DIF support */
580 scsi_host_set_prot(shost,
581 SHOST_DIF_TYPE1_PROTECTION |
582 SHOST_DIF_TYPE2_PROTECTION |
583 SHOST_DIF_TYPE3_PROTECTION);
584 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
585
579 err = scsi_add_host(shost, &pdev->dev); 586 err = scsi_add_host(shost, &pdev->dev);
580 if (err) 587 if (err)
581 goto err_shost; 588 goto err_shost;
@@ -663,13 +670,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
663 goto err_host_alloc; 670 goto err_host_alloc;
664 } 671 }
665 pci_info->hosts[i] = h; 672 pci_info->hosts[i] = h;
666
667 /* turn on DIF support */
668 scsi_host_set_prot(to_shost(h),
669 SHOST_DIF_TYPE1_PROTECTION |
670 SHOST_DIF_TYPE2_PROTECTION |
671 SHOST_DIF_TYPE3_PROTECTION);
672 scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
673 } 673 }
674 674
675 err = isci_setup_interrupts(pdev); 675 err = isci_setup_interrupts(pdev);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index be83590ed955..ff943f477d6f 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1726 fc_frame_payload_op(fp) != ELS_LS_ACC) { 1726 fc_frame_payload_op(fp) != ELS_LS_ACC) {
1727 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); 1727 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
1728 fc_lport_error(lport, fp); 1728 fc_lport_error(lport, fp);
1729 goto err; 1729 goto out;
1730 } 1730 }
1731 1731
1732 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1732 flp = fc_frame_payload_get(fp, sizeof(*flp));
1733 if (!flp) { 1733 if (!flp) {
1734 FC_LPORT_DBG(lport, "FLOGI bad response\n"); 1734 FC_LPORT_DBG(lport, "FLOGI bad response\n");
1735 fc_lport_error(lport, fp); 1735 fc_lport_error(lport, fp);
1736 goto err; 1736 goto out;
1737 } 1737 }
1738 1738
1739 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1739 mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1743 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " 1743 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
1744 "lport->mfs:%hu\n", mfs, lport->mfs); 1744 "lport->mfs:%hu\n", mfs, lport->mfs);
1745 fc_lport_error(lport, fp); 1745 fc_lport_error(lport, fp);
1746 goto err; 1746 goto out;
1747 } 1747 }
1748 1748
1749 if (mfs <= lport->mfs) { 1749 if (mfs <= lport->mfs) {
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 9192a1d9dec6..dfba4921b265 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref)
184 struct fc_rport_priv *rdata; 184 struct fc_rport_priv *rdata;
185 185
186 rdata = container_of(kref, struct fc_rport_priv, kref); 186 rdata = container_of(kref, struct fc_rport_priv, kref);
187 WARN_ON(!list_empty(&rdata->peers));
188 kfree_rcu(rdata, rcu); 187 kfree_rcu(rdata, rcu);
189} 188}
190EXPORT_SYMBOL(fc_rport_destroy); 189EXPORT_SYMBOL(fc_rport_destroy);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b8d325ce8754..120fc520f27a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1459,7 +1459,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
1459 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) 1459 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
1460 return -ENODATA; 1460 return -ENODATA;
1461 1461
1462 spin_lock_bh(&conn->session->back_lock);
1463 if (conn->task == NULL) {
1464 spin_unlock_bh(&conn->session->back_lock);
1465 return -ENODATA;
1466 }
1462 __iscsi_get_task(task); 1467 __iscsi_get_task(task);
1468 spin_unlock_bh(&conn->session->back_lock);
1463 spin_unlock_bh(&conn->session->frwd_lock); 1469 spin_unlock_bh(&conn->session->frwd_lock);
1464 rc = conn->session->tt->xmit_task(task); 1470 rc = conn->session->tt->xmit_task(task);
1465 spin_lock_bh(&conn->session->frwd_lock); 1471 spin_lock_bh(&conn->session->frwd_lock);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 17eb4185f29d..f21c93bbb35c 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -828,6 +828,7 @@ static struct domain_device *sas_ex_discover_end_dev(
828 rphy = sas_end_device_alloc(phy->port); 828 rphy = sas_end_device_alloc(phy->port);
829 if (!rphy) 829 if (!rphy)
830 goto out_free; 830 goto out_free;
831 rphy->identify.phy_identifier = phy_id;
831 832
832 child->rphy = rphy; 833 child->rphy = rphy;
833 get_device(&rphy->dev); 834 get_device(&rphy->dev);
@@ -854,6 +855,7 @@ static struct domain_device *sas_ex_discover_end_dev(
854 855
855 child->rphy = rphy; 856 child->rphy = rphy;
856 get_device(&rphy->dev); 857 get_device(&rphy->dev);
858 rphy->identify.phy_identifier = phy_id;
857 sas_fill_in_rphy(child, rphy); 859 sas_fill_in_rphy(child, rphy);
858 860
859 list_add_tail(&child->disco_list_node, &parent->port->disco_list); 861 list_add_tail(&child->disco_list_node, &parent->port->disco_list);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 8698af86485d..2dc564e59430 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2730,8 +2730,8 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2730 INIT_LIST_HEAD(&dmabuf->list); 2730 INIT_LIST_HEAD(&dmabuf->list);
2731 2731
2732 /* now, allocate dma buffer */ 2732 /* now, allocate dma buffer */
2733 dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2733 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2734 &(dmabuf->phys), GFP_KERNEL); 2734 &(dmabuf->phys), GFP_KERNEL);
2735 2735
2736 if (!dmabuf->virt) { 2736 if (!dmabuf->virt) {
2737 kfree(dmabuf); 2737 kfree(dmabuf);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c1c36812c3d2..bede11e16349 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6973,9 +6973,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6973 if (!dmabuf) 6973 if (!dmabuf)
6974 return NULL; 6974 return NULL;
6975 6975
6976 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6976 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6977 LPFC_HDR_TEMPLATE_SIZE, 6977 LPFC_HDR_TEMPLATE_SIZE,
6978 &dmabuf->phys, GFP_KERNEL); 6978 &dmabuf->phys, GFP_KERNEL);
6979 if (!dmabuf->virt) { 6979 if (!dmabuf->virt) {
6980 rpi_hdr = NULL; 6980 rpi_hdr = NULL;
6981 goto err_free_dmabuf; 6981 goto err_free_dmabuf;
@@ -7397,8 +7397,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7397 } 7397 }
7398 7398
7399 /* Allocate memory for SLI-2 structures */ 7399 /* Allocate memory for SLI-2 structures */
7400 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7400 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7401 &phba->slim2p.phys, GFP_KERNEL); 7401 &phba->slim2p.phys, GFP_KERNEL);
7402 if (!phba->slim2p.virt) 7402 if (!phba->slim2p.virt)
7403 goto out_iounmap; 7403 goto out_iounmap;
7404 7404
@@ -7816,8 +7816,8 @@ lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
7816 * plus an alignment restriction of 16 bytes. 7816 * plus an alignment restriction of 16 bytes.
7817 */ 7817 */
7818 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 7818 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
7819 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 7819 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
7820 &dmabuf->phys, GFP_KERNEL); 7820 &dmabuf->phys, GFP_KERNEL);
7821 if (!dmabuf->virt) { 7821 if (!dmabuf->virt) {
7822 kfree(dmabuf); 7822 kfree(dmabuf);
7823 return -ENOMEM; 7823 return -ENOMEM;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index f6a5083a621e..4d3b94317515 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1827,9 +1827,9 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1827 * page, this is used as a priori size of SLI4_PAGE_SIZE for 1827 * page, this is used as a priori size of SLI4_PAGE_SIZE for
1828 * the later DMA memory free. 1828 * the later DMA memory free.
1829 */ 1829 */
1830 viraddr = dma_zalloc_coherent(&phba->pcidev->dev, 1830 viraddr = dma_alloc_coherent(&phba->pcidev->dev,
1831 SLI4_PAGE_SIZE, &phyaddr, 1831 SLI4_PAGE_SIZE, &phyaddr,
1832 GFP_KERNEL); 1832 GFP_KERNEL);
1833 /* In case of malloc fails, proceed with whatever we have */ 1833 /* In case of malloc fails, proceed with whatever we have */
1834 if (!viraddr) 1834 if (!viraddr)
1835 break; 1835 break;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 4c66b19e6199..8c9f79042228 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
297 lport); 297 lport);
298 298
299 /* release any threads waiting for the unreg to complete */ 299 /* release any threads waiting for the unreg to complete */
300 complete(&lport->lport_unreg_done); 300 if (lport->vport->localport)
301 complete(lport->lport_unreg_cmp);
301} 302}
302 303
303/* lpfc_nvme_remoteport_delete 304/* lpfc_nvme_remoteport_delete
@@ -2545,7 +2546,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2545 */ 2546 */
2546void 2547void
2547lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, 2548lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2548 struct lpfc_nvme_lport *lport) 2549 struct lpfc_nvme_lport *lport,
2550 struct completion *lport_unreg_cmp)
2549{ 2551{
2550#if (IS_ENABLED(CONFIG_NVME_FC)) 2552#if (IS_ENABLED(CONFIG_NVME_FC))
2551 u32 wait_tmo; 2553 u32 wait_tmo;
@@ -2557,8 +2559,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2557 */ 2559 */
2558 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); 2560 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2559 while (true) { 2561 while (true) {
2560 ret = wait_for_completion_timeout(&lport->lport_unreg_done, 2562 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2561 wait_tmo);
2562 if (unlikely(!ret)) { 2563 if (unlikely(!ret)) {
2563 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 2564 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2564 "6176 Lport %p Localport %p wait " 2565 "6176 Lport %p Localport %p wait "
@@ -2592,12 +2593,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2592 struct lpfc_nvme_lport *lport; 2593 struct lpfc_nvme_lport *lport;
2593 struct lpfc_nvme_ctrl_stat *cstat; 2594 struct lpfc_nvme_ctrl_stat *cstat;
2594 int ret; 2595 int ret;
2596 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2595 2597
2596 if (vport->nvmei_support == 0) 2598 if (vport->nvmei_support == 0)
2597 return; 2599 return;
2598 2600
2599 localport = vport->localport; 2601 localport = vport->localport;
2600 vport->localport = NULL;
2601 lport = (struct lpfc_nvme_lport *)localport->private; 2602 lport = (struct lpfc_nvme_lport *)localport->private;
2602 cstat = lport->cstat; 2603 cstat = lport->cstat;
2603 2604
@@ -2608,13 +2609,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2608 /* lport's rport list is clear. Unregister 2609 /* lport's rport list is clear. Unregister
2609 * lport and release resources. 2610 * lport and release resources.
2610 */ 2611 */
2611 init_completion(&lport->lport_unreg_done); 2612 lport->lport_unreg_cmp = &lport_unreg_cmp;
2612 ret = nvme_fc_unregister_localport(localport); 2613 ret = nvme_fc_unregister_localport(localport);
2613 2614
2614 /* Wait for completion. This either blocks 2615 /* Wait for completion. This either blocks
2615 * indefinitely or succeeds 2616 * indefinitely or succeeds
2616 */ 2617 */
2617 lpfc_nvme_lport_unreg_wait(vport, lport); 2618 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2619 vport->localport = NULL;
2618 kfree(cstat); 2620 kfree(cstat);
2619 2621
2620 /* Regardless of the unregister upcall response, clear 2622 /* Regardless of the unregister upcall response, clear
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index cfd4719be25c..b234d0298994 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat {
50/* Declare nvme-based local and remote port definitions. */ 50/* Declare nvme-based local and remote port definitions. */
51struct lpfc_nvme_lport { 51struct lpfc_nvme_lport {
52 struct lpfc_vport *vport; 52 struct lpfc_vport *vport;
53 struct completion lport_unreg_done; 53 struct completion *lport_unreg_cmp;
54 /* Add stats counters here */ 54 /* Add stats counters here */
55 struct lpfc_nvme_ctrl_stat *cstat; 55 struct lpfc_nvme_ctrl_stat *cstat;
56 atomic_t fc4NvmeLsRequests; 56 atomic_t fc4NvmeLsRequests;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 6245f442d784..95fee83090eb 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1003 struct lpfc_nvmet_tgtport *tport = targetport->private; 1003 struct lpfc_nvmet_tgtport *tport = targetport->private;
1004 1004
1005 /* release any threads waiting for the unreg to complete */ 1005 /* release any threads waiting for the unreg to complete */
1006 complete(&tport->tport_unreg_done); 1006 if (tport->phba->targetport)
1007 complete(tport->tport_unreg_cmp);
1007} 1008}
1008 1009
1009static void 1010static void
@@ -1692,6 +1693,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1692 struct lpfc_nvmet_tgtport *tgtp; 1693 struct lpfc_nvmet_tgtport *tgtp;
1693 struct lpfc_queue *wq; 1694 struct lpfc_queue *wq;
1694 uint32_t qidx; 1695 uint32_t qidx;
1696 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1695 1697
1696 if (phba->nvmet_support == 0) 1698 if (phba->nvmet_support == 0)
1697 return; 1699 return;
@@ -1701,9 +1703,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1701 wq = phba->sli4_hba.nvme_wq[qidx]; 1703 wq = phba->sli4_hba.nvme_wq[qidx];
1702 lpfc_nvmet_wqfull_flush(phba, wq, NULL); 1704 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1703 } 1705 }
1704 init_completion(&tgtp->tport_unreg_done); 1706 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1705 nvmet_fc_unregister_targetport(phba->targetport); 1707 nvmet_fc_unregister_targetport(phba->targetport);
1706 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 1708 wait_for_completion_timeout(&tport_unreg_cmp, 5);
1707 lpfc_nvmet_cleanup_io_context(phba); 1709 lpfc_nvmet_cleanup_io_context(phba);
1708 } 1710 }
1709 phba->targetport = NULL; 1711 phba->targetport = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 1aaff63f1f41..0ec1082ce7ef 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -34,7 +34,7 @@
34/* Used for NVME Target */ 34/* Used for NVME Target */
35struct lpfc_nvmet_tgtport { 35struct lpfc_nvmet_tgtport {
36 struct lpfc_hba *phba; 36 struct lpfc_hba *phba;
37 struct completion tport_unreg_done; 37 struct completion *tport_unreg_cmp;
38 38
39 /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ 39 /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
40 atomic_t rcv_ls_req_in; 40 atomic_t rcv_ls_req_in;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 30734caf77e1..2242e9b3ca12 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5362,8 +5362,8 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5362 * mailbox command. 5362 * mailbox command.
5363 */ 5363 */
5364 dma_size = *vpd_size; 5364 dma_size = *vpd_size;
5365 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, 5365 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5366 &dmabuf->phys, GFP_KERNEL); 5366 &dmabuf->phys, GFP_KERNEL);
5367 if (!dmabuf->virt) { 5367 if (!dmabuf->virt) {
5368 kfree(dmabuf); 5368 kfree(dmabuf);
5369 return -ENOMEM; 5369 return -ENOMEM;
@@ -6300,10 +6300,9 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6300 goto free_mem; 6300 goto free_mem;
6301 } 6301 }
6302 6302
6303 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6303 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6304 LPFC_RAS_MAX_ENTRY_SIZE, 6304 LPFC_RAS_MAX_ENTRY_SIZE,
6305 &dmabuf->phys, 6305 &dmabuf->phys, GFP_KERNEL);
6306 GFP_KERNEL);
6307 if (!dmabuf->virt) { 6306 if (!dmabuf->virt) {
6308 kfree(dmabuf); 6307 kfree(dmabuf);
6309 rc = -ENOMEM; 6308 rc = -ENOMEM;
@@ -9408,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9408 cmnd = CMD_XMIT_SEQUENCE64_CR; 9407 cmnd = CMD_XMIT_SEQUENCE64_CR;
9409 if (phba->link_flag & LS_LOOPBACK_MODE) 9408 if (phba->link_flag & LS_LOOPBACK_MODE)
9410 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9409 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9410 /* fall through */
9411 case CMD_XMIT_SEQUENCE64_CR: 9411 case CMD_XMIT_SEQUENCE64_CR:
9412 /* word3 iocb=io_tag32 wqe=reserved */ 9412 /* word3 iocb=io_tag32 wqe=reserved */
9413 wqe->xmit_sequence.rsvd3 = 0; 9413 wqe->xmit_sequence.rsvd3 = 0;
@@ -13529,6 +13529,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13529 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13529 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13531 "2537 Receive Frame Truncated!!\n"); 13531 "2537 Receive Frame Truncated!!\n");
13532 /* fall through */
13532 case FC_STATUS_RQ_SUCCESS: 13533 case FC_STATUS_RQ_SUCCESS:
13533 spin_lock_irqsave(&phba->hbalock, iflags); 13534 spin_lock_irqsave(&phba->hbalock, iflags);
13534 lpfc_sli4_rq_release(hrq, drq); 13535 lpfc_sli4_rq_release(hrq, drq);
@@ -13938,7 +13939,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13938 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13939 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13940 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13940 "6126 Receive Frame Truncated!!\n"); 13941 "6126 Receive Frame Truncated!!\n");
13941 /* Drop thru */ 13942 /* fall through */
13942 case FC_STATUS_RQ_SUCCESS: 13943 case FC_STATUS_RQ_SUCCESS:
13943 spin_lock_irqsave(&phba->hbalock, iflags); 13944 spin_lock_irqsave(&phba->hbalock, iflags);
13944 lpfc_sli4_rq_release(hrq, drq); 13945 lpfc_sli4_rq_release(hrq, drq);
@@ -14613,9 +14614,9 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14613 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 14614 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14614 if (!dmabuf) 14615 if (!dmabuf)
14615 goto out_fail; 14616 goto out_fail;
14616 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 14617 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14617 hw_page_size, &dmabuf->phys, 14618 hw_page_size, &dmabuf->phys,
14618 GFP_KERNEL); 14619 GFP_KERNEL);
14619 if (!dmabuf->virt) { 14620 if (!dmabuf->virt) {
14620 kfree(dmabuf); 14621 kfree(dmabuf);
14621 goto out_fail; 14622 goto out_fail;
@@ -14850,7 +14851,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14850 eq->entry_count); 14851 eq->entry_count);
14851 if (eq->entry_count < 256) 14852 if (eq->entry_count < 256)
14852 return -EINVAL; 14853 return -EINVAL;
14853 /* otherwise default to smallest count (drop through) */ 14854 /* fall through - otherwise default to smallest count */
14854 case 256: 14855 case 256:
14855 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14856 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14856 LPFC_EQ_CNT_256); 14857 LPFC_EQ_CNT_256);
@@ -14981,7 +14982,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14981 LPFC_CQ_CNT_WORD7); 14982 LPFC_CQ_CNT_WORD7);
14982 break; 14983 break;
14983 } 14984 }
14984 /* Fall Thru */ 14985 /* fall through */
14985 default: 14986 default:
14986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14987 "0361 Unsupported CQ count: " 14988 "0361 Unsupported CQ count: "
@@ -14992,7 +14993,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14992 status = -EINVAL; 14993 status = -EINVAL;
14993 goto out; 14994 goto out;
14994 } 14995 }
14995 /* otherwise default to smallest count (drop through) */ 14996 /* fall through - otherwise default to smallest count */
14996 case 256: 14997 case 256:
14997 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14998 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14998 LPFC_CQ_CNT_256); 14999 LPFC_CQ_CNT_256);
@@ -15152,7 +15153,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15152 LPFC_CQ_CNT_WORD7); 15153 LPFC_CQ_CNT_WORD7);
15153 break; 15154 break;
15154 } 15155 }
15155 /* Fall Thru */ 15156 /* fall through */
15156 default: 15157 default:
15157 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15158 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15158 "3118 Bad CQ count. (%d)\n", 15159 "3118 Bad CQ count. (%d)\n",
@@ -15161,7 +15162,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15161 status = -EINVAL; 15162 status = -EINVAL;
15162 goto out; 15163 goto out;
15163 } 15164 }
15164 /* otherwise default to smallest (drop thru) */ 15165 /* fall through - otherwise default to smallest */
15165 case 256: 15166 case 256:
15166 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15167 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15167 &cq_set->u.request, LPFC_CQ_CNT_256); 15168 &cq_set->u.request, LPFC_CQ_CNT_256);
@@ -15433,7 +15434,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15433 status = -EINVAL; 15434 status = -EINVAL;
15434 goto out; 15435 goto out;
15435 } 15436 }
15436 /* otherwise default to smallest count (drop through) */ 15437 /* fall through - otherwise default to smallest count */
15437 case 16: 15438 case 16:
15438 bf_set(lpfc_mq_context_ring_size, 15439 bf_set(lpfc_mq_context_ring_size,
15439 &mq_create_ext->u.request.context, 15440 &mq_create_ext->u.request.context,
@@ -15852,7 +15853,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15852 status = -EINVAL; 15853 status = -EINVAL;
15853 goto out; 15854 goto out;
15854 } 15855 }
15855 /* otherwise default to smallest count (drop through) */ 15856 /* fall through - otherwise default to smallest count */
15856 case 512: 15857 case 512:
15857 bf_set(lpfc_rq_context_rqe_count, 15858 bf_set(lpfc_rq_context_rqe_count,
15858 &rq_create->u.request.context, 15859 &rq_create->u.request.context,
@@ -15989,7 +15990,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15989 status = -EINVAL; 15990 status = -EINVAL;
15990 goto out; 15991 goto out;
15991 } 15992 }
15992 /* otherwise default to smallest count (drop through) */ 15993 /* fall through - otherwise default to smallest count */
15993 case 512: 15994 case 512:
15994 bf_set(lpfc_rq_context_rqe_count, 15995 bf_set(lpfc_rq_context_rqe_count,
15995 &rq_create->u.request.context, 15996 &rq_create->u.request.context,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index e836392b75e8..f112458023ff 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -967,9 +967,10 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
967 * Allocate the common 16-byte aligned memory for the handshake 967 * Allocate the common 16-byte aligned memory for the handshake
968 * mailbox. 968 * mailbox.
969 */ 969 */
970 raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev, 970 raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev,
971 sizeof(mbox64_t), &raid_dev->una_mbox64_dma, 971 sizeof(mbox64_t),
972 GFP_KERNEL); 972 &raid_dev->una_mbox64_dma,
973 GFP_KERNEL);
973 974
974 if (!raid_dev->una_mbox64) { 975 if (!raid_dev->una_mbox64) {
975 con_log(CL_ANN, (KERN_WARNING 976 con_log(CL_ANN, (KERN_WARNING
@@ -995,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
995 align; 996 align;
996 997
997 // Allocate memory for commands issued internally 998 // Allocate memory for commands issued internally
998 adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, 999 adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
999 &adapter->ibuf_dma_h, GFP_KERNEL); 1000 &adapter->ibuf_dma_h, GFP_KERNEL);
1000 if (!adapter->ibuf) { 1001 if (!adapter->ibuf) {
1001 1002
1002 con_log(CL_ANN, (KERN_WARNING 1003 con_log(CL_ANN, (KERN_WARNING
@@ -2897,8 +2898,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
2897 * Issue an ENQUIRY3 command to find out certain adapter parameters, 2898 * Issue an ENQUIRY3 command to find out certain adapter parameters,
2898 * e.g., max channels, max commands etc. 2899 * e.g., max channels, max commands etc.
2899 */ 2900 */
2900 pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), 2901 pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
2901 &pinfo_dma_h, GFP_KERNEL); 2902 &pinfo_dma_h, GFP_KERNEL);
2902 if (pinfo == NULL) { 2903 if (pinfo == NULL) {
2903 con_log(CL_ANN, (KERN_WARNING 2904 con_log(CL_ANN, (KERN_WARNING
2904 "megaraid: out of memory, %s %d\n", __func__, 2905 "megaraid: out of memory, %s %d\n", __func__,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f7bdd783360a..fcbff83c0097 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2273,9 +2273,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2273 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2273 sizeof(struct MR_LD_VF_AFFILIATION_111));
2274 else { 2274 else {
2275 new_affiliation_111 = 2275 new_affiliation_111 =
2276 dma_zalloc_coherent(&instance->pdev->dev, 2276 dma_alloc_coherent(&instance->pdev->dev,
2277 sizeof(struct MR_LD_VF_AFFILIATION_111), 2277 sizeof(struct MR_LD_VF_AFFILIATION_111),
2278 &new_affiliation_111_h, GFP_KERNEL); 2278 &new_affiliation_111_h, GFP_KERNEL);
2279 if (!new_affiliation_111) { 2279 if (!new_affiliation_111) {
2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2281 "memory for new affiliation for scsi%d\n", 2281 "memory for new affiliation for scsi%d\n",
@@ -2380,10 +2380,9 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2380 sizeof(struct MR_LD_VF_AFFILIATION)); 2380 sizeof(struct MR_LD_VF_AFFILIATION));
2381 else { 2381 else {
2382 new_affiliation = 2382 new_affiliation =
2383 dma_zalloc_coherent(&instance->pdev->dev, 2383 dma_alloc_coherent(&instance->pdev->dev,
2384 (MAX_LOGICAL_DRIVES + 1) * 2384 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2385 sizeof(struct MR_LD_VF_AFFILIATION), 2385 &new_affiliation_h, GFP_KERNEL);
2386 &new_affiliation_h, GFP_KERNEL);
2387 if (!new_affiliation) { 2386 if (!new_affiliation) {
2388 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2387 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2389 "memory for new affiliation for scsi%d\n", 2388 "memory for new affiliation for scsi%d\n",
@@ -2546,9 +2545,10 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2546 2545
2547 if (initial) { 2546 if (initial) {
2548 instance->hb_host_mem = 2547 instance->hb_host_mem =
2549 dma_zalloc_coherent(&instance->pdev->dev, 2548 dma_alloc_coherent(&instance->pdev->dev,
2550 sizeof(struct MR_CTRL_HB_HOST_MEM), 2549 sizeof(struct MR_CTRL_HB_HOST_MEM),
2551 &instance->hb_host_mem_h, GFP_KERNEL); 2550 &instance->hb_host_mem_h,
2551 GFP_KERNEL);
2552 if (!instance->hb_host_mem) { 2552 if (!instance->hb_host_mem) {
2553 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2553 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2554 " memory for heartbeat host memory for scsi%d\n", 2554 " memory for heartbeat host memory for scsi%d\n",
@@ -5816,9 +5816,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
5816 } 5816 }
5817 5817
5818 dcmd = &cmd->frame->dcmd; 5818 dcmd = &cmd->frame->dcmd;
5819 el_info = dma_zalloc_coherent(&instance->pdev->dev, 5819 el_info = dma_alloc_coherent(&instance->pdev->dev,
5820 sizeof(struct megasas_evt_log_info), &el_info_h, 5820 sizeof(struct megasas_evt_log_info),
5821 GFP_KERNEL); 5821 &el_info_h, GFP_KERNEL);
5822 if (!el_info) { 5822 if (!el_info) {
5823 megasas_return_cmd(instance, cmd); 5823 megasas_return_cmd(instance, cmd);
5824 return -ENOMEM; 5824 return -ENOMEM;
@@ -6236,7 +6236,7 @@ megasas_set_dma_mask(struct megasas_instance *instance)
6236 instance->consistent_mask_64bit = true; 6236 instance->consistent_mask_64bit = true;
6237 6237
6238 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6238 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6239 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"), 6239 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6240 (instance->consistent_mask_64bit ? "63" : "32")); 6240 (instance->consistent_mask_64bit ? "63" : "32"));
6241 6241
6242 return 0; 6242 return 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 211c17c33aa0..647f48a28f85 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -175,7 +175,8 @@ megasas_clear_intr_fusion(struct megasas_instance *instance)
175 /* 175 /*
176 * Check if it is our interrupt 176 * Check if it is our interrupt
177 */ 177 */
178 status = readl(&regs->outbound_intr_status); 178 status = megasas_readl(instance,
179 &regs->outbound_intr_status);
179 180
180 if (status & 1) { 181 if (status & 1) {
181 writel(status, &regs->outbound_intr_status); 182 writel(status, &regs->outbound_intr_status);
@@ -689,8 +690,9 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
689 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * 690 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
690 MAX_MSIX_QUEUES_FUSION; 691 MAX_MSIX_QUEUES_FUSION;
691 692
692 fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev, 693 fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
693 array_size, &fusion->rdpq_phys, GFP_KERNEL); 694 array_size, &fusion->rdpq_phys,
695 GFP_KERNEL);
694 if (!fusion->rdpq_virt) { 696 if (!fusion->rdpq_virt) {
695 dev_err(&instance->pdev->dev, 697 dev_err(&instance->pdev->dev,
696 "Failed from %s %d\n", __func__, __LINE__); 698 "Failed from %s %d\n", __func__, __LINE__);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index f3e182eb0970..c9dc7740e9e7 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1915,8 +1915,9 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1915 /* We use the PCI APIs for now until the generic one gets fixed 1915 /* We use the PCI APIs for now until the generic one gets fixed
1916 * enough or until we get some macio-specific versions 1916 * enough or until we get some macio-specific versions
1917 */ 1917 */
1918 dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev, 1918 dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
1919 ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL); 1919 ms->dma_cmd_size, &dma_cmd_bus,
1920 GFP_KERNEL);
1920 if (dma_cmd_space == NULL) { 1921 if (dma_cmd_space == NULL) {
1921 printk(KERN_ERR "mesh: can't allocate DMA table\n"); 1922 printk(KERN_ERR "mesh: can't allocate DMA table\n");
1922 goto out_unmap; 1923 goto out_unmap;
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index dbe753fba486..36f64205ecfa 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -143,8 +143,9 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
143 143
144 case RESOURCE_UNCACHED_MEMORY: 144 case RESOURCE_UNCACHED_MEMORY:
145 size = round_up(size, 8); 145 size = round_up(size, 8);
146 res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, 146 res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
147 &res->bus_addr, GFP_KERNEL); 147 &res->bus_addr,
148 GFP_KERNEL);
148 if (!res->virt_addr) { 149 if (!res->virt_addr) {
149 dev_err(&mhba->pdev->dev, 150 dev_err(&mhba->pdev->dev,
150 "unable to allocate consistent mem," 151 "unable to allocate consistent mem,"
@@ -246,8 +247,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
246 if (size == 0) 247 if (size == 0)
247 return 0; 248 return 0;
248 249
249 virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr, 250 virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
250 GFP_KERNEL); 251 GFP_KERNEL);
251 if (!virt_addr) 252 if (!virt_addr)
252 return -1; 253 return -1;
253 254
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b3be49d41375..084f2fcced0a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
116 u64 align_offset = 0; 116 u64 align_offset = 0;
117 if (align) 117 if (align)
118 align_offset = (dma_addr_t)align - 1; 118 align_offset = (dma_addr_t)align - 1;
119 mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align, 119 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
120 &mem_dma_handle, GFP_KERNEL); 120 &mem_dma_handle, GFP_KERNEL);
121 if (!mem_virt_alloc) { 121 if (!mem_virt_alloc) {
122 pm8001_printk("memory allocation error\n"); 122 pm8001_printk("memory allocation error\n");
123 return -1; 123 return -1;
@@ -657,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
657 if (dev->dev_type == SAS_SATA_DEV) { 657 if (dev->dev_type == SAS_SATA_DEV) {
658 pm8001_device->attached_phy = 658 pm8001_device->attached_phy =
659 dev->rphy->identify.phy_identifier; 659 dev->rphy->identify.phy_identifier;
660 flag = 1; /* directly sata*/ 660 flag = 1; /* directly sata */
661 } 661 }
662 } /*register this device to HBA*/ 662 } /*register this device to HBA*/
663 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); 663 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index edcaf4b0cb0b..9bbc19fc190b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1050,16 +1050,17 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1050 sizeof(void *); 1050 sizeof(void *);
1051 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; 1051 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
1052 1052
1053 fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, 1053 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1054 fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); 1054 &fcport->sq_dma, GFP_KERNEL);
1055 if (!fcport->sq) { 1055 if (!fcport->sq) {
1056 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); 1056 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
1057 rval = 1; 1057 rval = 1;
1058 goto out; 1058 goto out;
1059 } 1059 }
1060 1060
1061 fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, 1061 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1062 fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); 1062 fcport->sq_pbl_size,
1063 &fcport->sq_pbl_dma, GFP_KERNEL);
1063 if (!fcport->sq_pbl) { 1064 if (!fcport->sq_pbl) {
1064 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); 1065 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
1065 rval = 1; 1066 rval = 1;
@@ -2680,8 +2681,10 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2680 } 2681 }
2681 2682
2682 /* Allocate list of PBL pages */ 2683 /* Allocate list of PBL pages */
2683 qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, 2684 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
2684 QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); 2685 QEDF_PAGE_SIZE,
2686 &qedf->bdq_pbl_list_dma,
2687 GFP_KERNEL);
2685 if (!qedf->bdq_pbl_list) { 2688 if (!qedf->bdq_pbl_list) {
2686 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); 2689 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
2687 return -ENOMEM; 2690 return -ENOMEM;
@@ -2770,9 +2773,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
2770 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); 2773 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
2771 2774
2772 qedf->global_queues[i]->cq = 2775 qedf->global_queues[i]->cq =
2773 dma_zalloc_coherent(&qedf->pdev->dev, 2776 dma_alloc_coherent(&qedf->pdev->dev,
2774 qedf->global_queues[i]->cq_mem_size, 2777 qedf->global_queues[i]->cq_mem_size,
2775 &qedf->global_queues[i]->cq_dma, GFP_KERNEL); 2778 &qedf->global_queues[i]->cq_dma,
2779 GFP_KERNEL);
2776 2780
2777 if (!qedf->global_queues[i]->cq) { 2781 if (!qedf->global_queues[i]->cq) {
2778 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); 2782 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
@@ -2781,9 +2785,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
2781 } 2785 }
2782 2786
2783 qedf->global_queues[i]->cq_pbl = 2787 qedf->global_queues[i]->cq_pbl =
2784 dma_zalloc_coherent(&qedf->pdev->dev, 2788 dma_alloc_coherent(&qedf->pdev->dev,
2785 qedf->global_queues[i]->cq_pbl_size, 2789 qedf->global_queues[i]->cq_pbl_size,
2786 &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); 2790 &qedf->global_queues[i]->cq_pbl_dma,
2791 GFP_KERNEL);
2787 2792
2788 if (!qedf->global_queues[i]->cq_pbl) { 2793 if (!qedf->global_queues[i]->cq_pbl) {
2789 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); 2794 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 4da660c1c431..6d6d6013e35b 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -953,6 +953,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
953 953
954 qedi_ep = ep->dd_data; 954 qedi_ep = ep->dd_data;
955 if (qedi_ep->state == EP_STATE_IDLE || 955 if (qedi_ep->state == EP_STATE_IDLE ||
956 qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
956 qedi_ep->state == EP_STATE_OFLDCONN_FAILED) 957 qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
957 return -1; 958 return -1;
958 959
@@ -1035,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
1035 1036
1036 switch (qedi_ep->state) { 1037 switch (qedi_ep->state) {
1037 case EP_STATE_OFLDCONN_START: 1038 case EP_STATE_OFLDCONN_START:
1039 case EP_STATE_OFLDCONN_NONE:
1038 goto ep_release_conn; 1040 goto ep_release_conn;
1039 case EP_STATE_OFLDCONN_FAILED: 1041 case EP_STATE_OFLDCONN_FAILED:
1040 break; 1042 break;
@@ -1225,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
1225 1227
1226 if (!is_valid_ether_addr(&path_data->mac_addr[0])) { 1228 if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
1227 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); 1229 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
1230 qedi_ep->state = EP_STATE_OFLDCONN_NONE;
1228 ret = -EIO; 1231 ret = -EIO;
1229 goto set_path_exit; 1232 goto set_path_exit;
1230 } 1233 }
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 11260776212f..892d70d54553 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -59,6 +59,7 @@ enum {
59 EP_STATE_OFLDCONN_FAILED = 0x2000, 59 EP_STATE_OFLDCONN_FAILED = 0x2000,
60 EP_STATE_CONNECT_FAILED = 0x4000, 60 EP_STATE_CONNECT_FAILED = 0x4000,
61 EP_STATE_DISCONN_TIMEDOUT = 0x8000, 61 EP_STATE_DISCONN_TIMEDOUT = 0x8000,
62 EP_STATE_OFLDCONN_NONE = 0x10000,
62}; 63};
63 64
64struct qedi_conn; 65struct qedi_conn;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5c53409a8cea..e74a62448ba4 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1394,10 +1394,9 @@ static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1394{ 1394{
1395 struct qedi_nvm_iscsi_image nvm_image; 1395 struct qedi_nvm_iscsi_image nvm_image;
1396 1396
1397 qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev, 1397 qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
1398 sizeof(nvm_image), 1398 sizeof(nvm_image),
1399 &qedi->nvm_buf_dma, 1399 &qedi->nvm_buf_dma, GFP_KERNEL);
1400 GFP_KERNEL);
1401 if (!qedi->iscsi_image) { 1400 if (!qedi->iscsi_image) {
1402 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); 1401 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
1403 return -ENOMEM; 1402 return -ENOMEM;
@@ -1510,10 +1509,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
1510 } 1509 }
1511 1510
1512 /* Allocate list of PBL pages */ 1511 /* Allocate list of PBL pages */
1513 qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, 1512 qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
1514 QEDI_PAGE_SIZE, 1513 QEDI_PAGE_SIZE,
1515 &qedi->bdq_pbl_list_dma, 1514 &qedi->bdq_pbl_list_dma,
1516 GFP_KERNEL); 1515 GFP_KERNEL);
1517 if (!qedi->bdq_pbl_list) { 1516 if (!qedi->bdq_pbl_list) {
1518 QEDI_ERR(&qedi->dbg_ctx, 1517 QEDI_ERR(&qedi->dbg_ctx,
1519 "Could not allocate list of PBL pages.\n"); 1518 "Could not allocate list of PBL pages.\n");
@@ -1609,10 +1608,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1609 (qedi->global_queues[i]->cq_pbl_size + 1608 (qedi->global_queues[i]->cq_pbl_size +
1610 (QEDI_PAGE_SIZE - 1)); 1609 (QEDI_PAGE_SIZE - 1));
1611 1610
1612 qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev, 1611 qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev,
1613 qedi->global_queues[i]->cq_mem_size, 1612 qedi->global_queues[i]->cq_mem_size,
1614 &qedi->global_queues[i]->cq_dma, 1613 &qedi->global_queues[i]->cq_dma,
1615 GFP_KERNEL); 1614 GFP_KERNEL);
1616 1615
1617 if (!qedi->global_queues[i]->cq) { 1616 if (!qedi->global_queues[i]->cq) {
1618 QEDI_WARN(&qedi->dbg_ctx, 1617 QEDI_WARN(&qedi->dbg_ctx,
@@ -1620,10 +1619,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1620 status = -ENOMEM; 1619 status = -ENOMEM;
1621 goto mem_alloc_failure; 1620 goto mem_alloc_failure;
1622 } 1621 }
1623 qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, 1622 qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
1624 qedi->global_queues[i]->cq_pbl_size, 1623 qedi->global_queues[i]->cq_pbl_size,
1625 &qedi->global_queues[i]->cq_pbl_dma, 1624 &qedi->global_queues[i]->cq_pbl_dma,
1626 GFP_KERNEL); 1625 GFP_KERNEL);
1627 1626
1628 if (!qedi->global_queues[i]->cq_pbl) { 1627 if (!qedi->global_queues[i]->cq_pbl) {
1629 QEDI_WARN(&qedi->dbg_ctx, 1628 QEDI_WARN(&qedi->dbg_ctx,
@@ -1691,16 +1690,16 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
1691 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); 1690 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
1692 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; 1691 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
1693 1692
1694 ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, 1693 ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
1695 &ep->sq_dma, GFP_KERNEL); 1694 &ep->sq_dma, GFP_KERNEL);
1696 if (!ep->sq) { 1695 if (!ep->sq) {
1697 QEDI_WARN(&qedi->dbg_ctx, 1696 QEDI_WARN(&qedi->dbg_ctx,
1698 "Could not allocate send queue.\n"); 1697 "Could not allocate send queue.\n");
1699 rval = -ENOMEM; 1698 rval = -ENOMEM;
1700 goto out; 1699 goto out;
1701 } 1700 }
1702 ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, 1701 ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
1703 &ep->sq_pbl_dma, GFP_KERNEL); 1702 &ep->sq_pbl_dma, GFP_KERNEL);
1704 if (!ep->sq_pbl) { 1703 if (!ep->sq_pbl) {
1705 QEDI_WARN(&qedi->dbg_ctx, 1704 QEDI_WARN(&qedi->dbg_ctx,
1706 "Could not allocate send queue PBL.\n"); 1705 "Could not allocate send queue PBL.\n");
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index a414f51302b7..6856dfdfa473 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4248,7 +4248,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4248 ha->devnum = devnum; /* specifies microcode load address */ 4248 ha->devnum = devnum; /* specifies microcode load address */
4249 4249
4250#ifdef QLA_64BIT_PTR 4250#ifdef QLA_64BIT_PTR
4251 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 4251 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
4252 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { 4252 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4253 printk(KERN_WARNING "scsi(%li): Unable to set a " 4253 printk(KERN_WARNING "scsi(%li): Unable to set a "
4254 "suitable DMA mask - aborting\n", ha->host_no); 4254 "suitable DMA mask - aborting\n", ha->host_no);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 00444dc79756..ac504a1ff0ff 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2415,8 +2415,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2415 if (qla2x00_chip_is_down(vha)) 2415 if (qla2x00_chip_is_down(vha))
2416 goto done; 2416 goto done;
2417 2417
2418 stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 2418 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2419 &stats_dma, GFP_KERNEL); 2419 GFP_KERNEL);
2420 if (!stats) { 2420 if (!stats) {
2421 ql_log(ql_log_warn, vha, 0x707d, 2421 ql_log(ql_log_warn, vha, 0x707d,
2422 "Failed to allocate memory for stats.\n"); 2422 "Failed to allocate memory for stats.\n");
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 4a9fd8d944d6..17d42658ad9a 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2312,8 +2312,8 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2312 if (!IS_FWI2_CAPABLE(ha)) 2312 if (!IS_FWI2_CAPABLE(ha))
2313 return -EPERM; 2313 return -EPERM;
2314 2314
2315 stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 2315 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2316 &stats_dma, GFP_KERNEL); 2316 GFP_KERNEL);
2317 if (!stats) { 2317 if (!stats) {
2318 ql_log(ql_log_warn, vha, 0x70e2, 2318 ql_log(ql_log_warn, vha, 0x70e2,
2319 "Failed to allocate memory for stats.\n"); 2319 "Failed to allocate memory for stats.\n");
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 26b93c563f92..d1fc4958222a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -4394,6 +4394,8 @@ typedef struct scsi_qla_host {
4394 uint16_t n2n_id; 4394 uint16_t n2n_id;
4395 struct list_head gpnid_list; 4395 struct list_head gpnid_list;
4396 struct fab_scan scan; 4396 struct fab_scan scan;
4397
4398 unsigned int irq_offset;
4397} scsi_qla_host_t; 4399} scsi_qla_host_t;
4398 4400
4399struct qla27xx_image_status { 4401struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 90cfa394f942..cbc3bc49d4d1 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -4147,9 +4147,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4147 return rval; 4147 return rval;
4148 } 4148 }
4149 4149
4150 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( 4150 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
4151 &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), 4151 sizeof(struct ct_sns_pkt),
4152 &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); 4152 &sp->u.iocb_cmd.u.ctarg.req_dma,
4153 GFP_KERNEL);
4153 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 4154 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4154 if (!sp->u.iocb_cmd.u.ctarg.req) { 4155 if (!sp->u.iocb_cmd.u.ctarg.req) {
4155 ql_log(ql_log_warn, vha, 0xffff, 4156 ql_log(ql_log_warn, vha, 0xffff,
@@ -4165,9 +4166,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4165 ((vha->hw->max_fibre_devices - 1) * 4166 ((vha->hw->max_fibre_devices - 1) *
4166 sizeof(struct ct_sns_gpn_ft_data)); 4167 sizeof(struct ct_sns_gpn_ft_data));
4167 4168
4168 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( 4169 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4169 &vha->hw->pdev->dev, rspsz, 4170 rspsz,
4170 &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); 4171 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4172 GFP_KERNEL);
4171 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 4173 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
4172 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 4174 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4173 ql_log(ql_log_warn, vha, 0xffff, 4175 ql_log(ql_log_warn, vha, 0xffff,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 364bb52ed2a6..8d1acc802a67 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1785,13 +1785,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1785 1785
1786 /* Issue Marker IOCB */ 1786 /* Issue Marker IOCB */
1787 qla2x00_marker(vha, vha->hw->req_q_map[0], 1787 qla2x00_marker(vha, vha->hw->req_q_map[0],
1788 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, 1788 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
1789 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 1789 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1790 } 1790 }
1791 1791
1792done_free_sp: 1792done_free_sp:
1793 sp->free(sp); 1793 sp->free(sp);
1794 sp->fcport->flags &= ~FCF_ASYNC_SENT; 1794 fcport->flags &= ~FCF_ASYNC_SENT;
1795done: 1795done:
1796 return rval; 1796 return rval;
1797} 1797}
@@ -3099,8 +3099,8 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3099 FCE_SIZE, ha->fce, ha->fce_dma); 3099 FCE_SIZE, ha->fce, ha->fce_dma);
3100 3100
3101 /* Allocate memory for Fibre Channel Event Buffer. */ 3101 /* Allocate memory for Fibre Channel Event Buffer. */
3102 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3102 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3103 GFP_KERNEL); 3103 GFP_KERNEL);
3104 if (!tc) { 3104 if (!tc) {
3105 ql_log(ql_log_warn, vha, 0x00be, 3105 ql_log(ql_log_warn, vha, 0x00be,
3106 "Unable to allocate (%d KB) for FCE.\n", 3106 "Unable to allocate (%d KB) for FCE.\n",
@@ -3131,8 +3131,8 @@ try_eft:
3131 EFT_SIZE, ha->eft, ha->eft_dma); 3131 EFT_SIZE, ha->eft, ha->eft_dma);
3132 3132
3133 /* Allocate memory for Extended Trace Buffer. */ 3133 /* Allocate memory for Extended Trace Buffer. */
3134 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3134 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3135 GFP_KERNEL); 3135 GFP_KERNEL);
3136 if (!tc) { 3136 if (!tc) {
3137 ql_log(ql_log_warn, vha, 0x00c1, 3137 ql_log(ql_log_warn, vha, 0x00c1,
3138 "Unable to allocate (%d KB) for EFT.\n", 3138 "Unable to allocate (%d KB) for EFT.\n",
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 30d3090842f8..8507c43b918c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3446,6 +3446,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3446 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 3446 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3447 } 3447 }
3448 } 3448 }
3449 vha->irq_offset = desc.pre_vectors;
3449 ha->msix_entries = kcalloc(ha->msix_count, 3450 ha->msix_entries = kcalloc(ha->msix_count,
3450 sizeof(struct qla_msix_entry), 3451 sizeof(struct qla_msix_entry),
3451 GFP_KERNEL); 3452 GFP_KERNEL);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ea69dafc9774..c6ef83d0d99b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -6939,7 +6939,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
6939 if (USER_CTRL_IRQ(vha->hw)) 6939 if (USER_CTRL_IRQ(vha->hw))
6940 rc = blk_mq_map_queues(qmap); 6940 rc = blk_mq_map_queues(qmap);
6941 else 6941 else
6942 rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0); 6942 rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
6943 return rc; 6943 return rc;
6944} 6944}
6945 6945
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1ef74aa2d00a..2bf5e3e639e1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,8 +153,8 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
153 dma_addr_t sys_info_dma; 153 dma_addr_t sys_info_dma;
154 int status = QLA_ERROR; 154 int status = QLA_ERROR;
155 155
156 sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 156 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
157 &sys_info_dma, GFP_KERNEL); 157 &sys_info_dma, GFP_KERNEL);
158 if (sys_info == NULL) { 158 if (sys_info == NULL) {
159 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 159 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
160 ha->host_no, __func__)); 160 ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 5d56904687b9..dac9a7013208 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,9 +625,9 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
625 uint32_t mbox_sts[MBOX_REG_COUNT]; 625 uint32_t mbox_sts[MBOX_REG_COUNT];
626 int status = QLA_ERROR; 626 int status = QLA_ERROR;
627 627
628 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 628 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
629 sizeof(struct addr_ctrl_blk), 629 sizeof(struct addr_ctrl_blk),
630 &init_fw_cb_dma, GFP_KERNEL); 630 &init_fw_cb_dma, GFP_KERNEL);
631 if (init_fw_cb == NULL) { 631 if (init_fw_cb == NULL) {
632 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", 632 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
633 ha->host_no, __func__)); 633 ha->host_no, __func__));
@@ -709,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
709 uint32_t mbox_cmd[MBOX_REG_COUNT]; 709 uint32_t mbox_cmd[MBOX_REG_COUNT];
710 uint32_t mbox_sts[MBOX_REG_COUNT]; 710 uint32_t mbox_sts[MBOX_REG_COUNT];
711 711
712 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 712 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
713 sizeof(struct addr_ctrl_blk), 713 sizeof(struct addr_ctrl_blk),
714 &init_fw_cb_dma, GFP_KERNEL); 714 &init_fw_cb_dma, GFP_KERNEL);
715 if (init_fw_cb == NULL) { 715 if (init_fw_cb == NULL) {
716 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, 716 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
717 __func__); 717 __func__);
@@ -1340,9 +1340,9 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
1340 uint32_t mbox_sts[MBOX_REG_COUNT]; 1340 uint32_t mbox_sts[MBOX_REG_COUNT];
1341 int status = QLA_ERROR; 1341 int status = QLA_ERROR;
1342 1342
1343 about_fw = dma_zalloc_coherent(&ha->pdev->dev, 1343 about_fw = dma_alloc_coherent(&ha->pdev->dev,
1344 sizeof(struct about_fw_info), 1344 sizeof(struct about_fw_info),
1345 &about_fw_dma, GFP_KERNEL); 1345 &about_fw_dma, GFP_KERNEL);
1346 if (!about_fw) { 1346 if (!about_fw) {
1347 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " 1347 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
1348 "for about_fw\n", __func__)); 1348 "for about_fw\n", __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index d2b333d629be..5a31877c9d04 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4052,8 +4052,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
4052 dma_addr_t sys_info_dma; 4052 dma_addr_t sys_info_dma;
4053 int status = QLA_ERROR; 4053 int status = QLA_ERROR;
4054 4054
4055 sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 4055 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
4056 &sys_info_dma, GFP_KERNEL); 4056 &sys_info_dma, GFP_KERNEL);
4057 if (sys_info == NULL) { 4057 if (sys_info == NULL) {
4058 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 4058 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
4059 ha->host_no, __func__)); 4059 ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 949e186cc5d7..a77bfb224248 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2704,9 +2704,9 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
2704 uint32_t rem = len; 2704 uint32_t rem = len;
2705 struct nlattr *attr; 2705 struct nlattr *attr;
2706 2706
2707 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 2707 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
2708 sizeof(struct addr_ctrl_blk), 2708 sizeof(struct addr_ctrl_blk),
2709 &init_fw_cb_dma, GFP_KERNEL); 2709 &init_fw_cb_dma, GFP_KERNEL);
2710 if (!init_fw_cb) { 2710 if (!init_fw_cb) {
2711 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2711 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
2712 __func__); 2712 __func__);
@@ -4206,8 +4206,8 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
4206 sizeof(struct shadow_regs) + 4206 sizeof(struct shadow_regs) +
4207 MEM_ALIGN_VALUE + 4207 MEM_ALIGN_VALUE +
4208 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4208 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4209 ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len, 4209 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
4210 &ha->queues_dma, GFP_KERNEL); 4210 &ha->queues_dma, GFP_KERNEL);
4211 if (ha->queues == NULL) { 4211 if (ha->queues == NULL) {
4212 ql4_printk(KERN_WARNING, ha, 4212 ql4_printk(KERN_WARNING, ha,
4213 "Memory Allocation failed - queues.\n"); 4213 "Memory Allocation failed - queues.\n");
@@ -7232,6 +7232,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
7232 7232
7233 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7233 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
7234 fw_ddb_entry); 7234 fw_ddb_entry);
7235 if (rc)
7236 goto free_sess;
7235 7237
7236 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7238 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7237 __func__, fnode_sess->dev.kobj.name); 7239 __func__, fnode_sess->dev.kobj.name);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 661512bec3ac..e27f4df24021 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -62,7 +62,7 @@
62 62
63/* make sure inq_product_rev string corresponds to this version */ 63/* make sure inq_product_rev string corresponds to this version */
64#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ 64#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
65static const char *sdebug_version_date = "20180128"; 65static const char *sdebug_version_date = "20190125";
66 66
67#define MY_NAME "scsi_debug" 67#define MY_NAME "scsi_debug"
68 68
@@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
735 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); 735 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
736} 736}
737 737
738static void *fake_store(unsigned long long lba) 738static void *lba2fake_store(unsigned long long lba)
739{ 739{
740 lba = do_div(lba, sdebug_store_sectors); 740 lba = do_div(lba, sdebug_store_sectors);
741 741
@@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2514 return ret; 2514 return ret;
2515} 2515}
2516 2516
2517/* If fake_store(lba,num) compares equal to arr(num), then copy top half of 2517/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
2518 * arr into fake_store(lba,num) and return true. If comparison fails then 2518 * arr into lba2fake_store(lba,num) and return true. If comparison fails then
2519 * return false. */ 2519 * return false. */
2520static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) 2520static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2521{ 2521{
@@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2643 if (sdt->app_tag == cpu_to_be16(0xffff)) 2643 if (sdt->app_tag == cpu_to_be16(0xffff))
2644 continue; 2644 continue;
2645 2645
2646 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); 2646 ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
2647 if (ret) { 2647 if (ret) {
2648 dif_errors++; 2648 dif_errors++;
2649 return ret; 2649 return ret;
@@ -3261,10 +3261,12 @@ err_out:
3261static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, 3261static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3262 u32 ei_lba, bool unmap, bool ndob) 3262 u32 ei_lba, bool unmap, bool ndob)
3263{ 3263{
3264 int ret;
3264 unsigned long iflags; 3265 unsigned long iflags;
3265 unsigned long long i; 3266 unsigned long long i;
3266 int ret; 3267 u32 lb_size = sdebug_sector_size;
3267 u64 lba_off; 3268 u64 block, lbaa;
3269 u8 *fs1p;
3268 3270
3269 ret = check_device_access_params(scp, lba, num); 3271 ret = check_device_access_params(scp, lba, num);
3270 if (ret) 3272 if (ret)
@@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3276 unmap_region(lba, num); 3278 unmap_region(lba, num);
3277 goto out; 3279 goto out;
3278 } 3280 }
3279 3281 lbaa = lba;
3280 lba_off = lba * sdebug_sector_size; 3282 block = do_div(lbaa, sdebug_store_sectors);
3281 /* if ndob then zero 1 logical block, else fetch 1 logical block */ 3283 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3284 fs1p = fake_storep + (block * lb_size);
3282 if (ndob) { 3285 if (ndob) {
3283 memset(fake_storep + lba_off, 0, sdebug_sector_size); 3286 memset(fs1p, 0, lb_size);
3284 ret = 0; 3287 ret = 0;
3285 } else 3288 } else
3286 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, 3289 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3287 sdebug_sector_size);
3288 3290
3289 if (-1 == ret) { 3291 if (-1 == ret) {
3290 write_unlock_irqrestore(&atomic_rw, iflags); 3292 write_unlock_irqrestore(&atomic_rw, iflags);
3291 return DID_ERROR << 16; 3293 return DID_ERROR << 16;
3292 } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) 3294 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3293 sdev_printk(KERN_INFO, scp->device, 3295 sdev_printk(KERN_INFO, scp->device,
3294 "%s: %s: lb size=%u, IO sent=%d bytes\n", 3296 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3295 my_name, "write same", 3297 my_name, "write same", lb_size, ret);
3296 sdebug_sector_size, ret);
3297 3298
3298 /* Copy first sector to remaining blocks */ 3299 /* Copy first sector to remaining blocks */
3299 for (i = 1 ; i < num ; i++) 3300 for (i = 1 ; i < num ; i++) {
3300 memcpy(fake_storep + ((lba + i) * sdebug_sector_size), 3301 lbaa = lba + i;
3301 fake_storep + lba_off, 3302 block = do_div(lbaa, sdebug_store_sectors);
3302 sdebug_sector_size); 3303 memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3303 3304 }
3304 if (scsi_debug_lbp()) 3305 if (scsi_debug_lbp())
3305 map_region(lba, num); 3306 map_region(lba, num);
3306out: 3307out:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b13cc9288ba0..f8d51c3d5582 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -655,6 +655,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
655 set_host_byte(cmd, DID_OK); 655 set_host_byte(cmd, DID_OK);
656 return BLK_STS_TARGET; 656 return BLK_STS_TARGET;
657 case DID_NEXUS_FAILURE: 657 case DID_NEXUS_FAILURE:
658 set_host_byte(cmd, DID_OK);
658 return BLK_STS_NEXUS; 659 return BLK_STS_NEXUS;
659 case DID_ALLOC_FAILURE: 660 case DID_ALLOC_FAILURE:
660 set_host_byte(cmd, DID_OK); 661 set_host_byte(cmd, DID_OK);
@@ -1842,8 +1843,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1842 blk_queue_segment_boundary(q, shost->dma_boundary); 1843 blk_queue_segment_boundary(q, shost->dma_boundary);
1843 dma_set_seg_boundary(dev, shost->dma_boundary); 1844 dma_set_seg_boundary(dev, shost->dma_boundary);
1844 1845
1845 blk_queue_max_segment_size(q, 1846 blk_queue_max_segment_size(q, shost->max_segment_size);
1846 min(shost->max_segment_size, dma_get_max_seg_size(dev))); 1847 dma_set_max_seg_size(dev, shost->max_segment_size);
1847 1848
1848 /* 1849 /*
1849 * Set a reasonable default alignment: The larger of 32-byte (dword), 1850 * Set a reasonable default alignment: The larger of 32-byte (dword),
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index a2b4179bfdf7..7639df91b110 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev,
80 80
81 if (err == 0) { 81 if (err == 0) {
82 pm_runtime_disable(dev); 82 pm_runtime_disable(dev);
83 pm_runtime_set_active(dev); 83 err = pm_runtime_set_active(dev);
84 pm_runtime_enable(dev); 84 pm_runtime_enable(dev);
85
86 /*
87 * Forcibly set runtime PM status of request queue to "active"
88 * to make sure we can again get requests from the queue
89 * (see also blk_pm_peek_request()).
90 *
91 * The resume hook will correct runtime PM status of the disk.
92 */
93 if (!err && scsi_is_sdev_device(dev)) {
94 struct scsi_device *sdev = to_scsi_device(dev);
95
96 if (sdev->request_queue->dev)
97 blk_set_runtime_active(sdev->request_queue);
98 }
85 } 99 }
86 100
87 return err; 101 return err;
@@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev,
140 else 154 else
141 fn = NULL; 155 fn = NULL;
142 156
143 /*
144 * Forcibly set runtime PM status of request queue to "active" to
145 * make sure we can again get requests from the queue (see also
146 * blk_pm_peek_request()).
147 *
148 * The resume hook will correct runtime PM status of the disk.
149 */
150 if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
151 blk_set_runtime_active(to_scsi_device(dev)->request_queue);
152
153 if (fn) { 157 if (fn) {
154 async_schedule_domain(fn, dev, &scsi_sd_pm_domain); 158 async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
155 159
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a1a44f52e0e8..5464d467e23e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
206 sp = buffer_data[0] & 0x80 ? 1 : 0; 206 sp = buffer_data[0] & 0x80 ? 1 : 0;
207 buffer_data[0] &= ~0x80; 207 buffer_data[0] &= ~0x80;
208 208
209 /*
210 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
211 * received mode parameter buffer before doing MODE SELECT.
212 */
213 data.device_specific = 0;
214
209 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, 215 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
210 SD_MAX_RETRIES, &data, &sshdr)) { 216 SD_MAX_RETRIES, &data, &sshdr)) {
211 if (scsi_sense_valid(&sshdr)) 217 if (scsi_sense_valid(&sshdr))
@@ -2945,9 +2951,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2945 if (rot == 1) { 2951 if (rot == 1) {
2946 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2952 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2947 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2953 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2948 } else {
2949 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2950 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
2951 } 2954 }
2952 2955
2953 if (sdkp->device->type == TYPE_ZBC) { 2956 if (sdkp->device->type == TYPE_ZBC) {
@@ -3084,6 +3087,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
3084 if (sdkp->media_present) { 3087 if (sdkp->media_present) {
3085 sd_read_capacity(sdkp, buffer); 3088 sd_read_capacity(sdkp, buffer);
3086 3089
3090 /*
3091 * set the default to rotational. All non-rotational devices
3092 * support the block characteristics VPD page, which will
3093 * cause this to be updated correctly and any device which
3094 * doesn't support it should be treated as rotational.
3095 */
3096 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
3097 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
3098
3087 if (scsi_device_supports_vpd(sdp)) { 3099 if (scsi_device_supports_vpd(sdp)) {
3088 sd_read_block_provisioning(sdkp); 3100 sd_read_block_provisioning(sdkp);
3089 sd_read_block_limits(sdkp); 3101 sd_read_block_limits(sdkp);
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 83365b29a4d8..a340af797a85 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
142 return -EOPNOTSUPP; 142 return -EOPNOTSUPP;
143 143
144 /* 144 /*
145 * Get a reply buffer for the number of requested zones plus a header. 145 * Get a reply buffer for the number of requested zones plus a header,
146 * For ATA, buffers must be aligned to 512B. 146 * without exceeding the device maximum command size. For ATA disks,
147 * buffers must be aligned to 512B.
147 */ 148 */
148 buflen = roundup((nrz + 1) * 64, 512); 149 buflen = min(queue_max_hw_sectors(disk->queue) << 9,
150 roundup((nrz + 1) * 64, 512));
149 buf = kmalloc(buflen, gfp_mask); 151 buf = kmalloc(buflen, gfp_mask);
150 if (!buf) 152 if (!buf)
151 return -ENOMEM; 153 return -ENOMEM;
@@ -462,12 +464,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
462 sdkp->device->use_10_for_rw = 0; 464 sdkp->device->use_10_for_rw = 0;
463 465
464 /* 466 /*
465 * If something changed, revalidate the disk zone bitmaps once we have 467 * Revalidate the disk zone bitmaps once the block device capacity is
466 * the capacity, that is on the second revalidate execution during disk 468 * set on the second revalidate execution during disk scan and if
467 * scan and always during normal revalidate. 469 * something changed when executing a normal revalidate.
468 */ 470 */
469 if (sdkp->first_scan) 471 if (sdkp->first_scan) {
472 sdkp->zone_blocks = zone_blocks;
473 sdkp->nr_zones = nr_zones;
470 return 0; 474 return 0;
475 }
476
471 if (sdkp->zone_blocks != zone_blocks || 477 if (sdkp->zone_blocks != zone_blocks ||
472 sdkp->nr_zones != nr_zones || 478 sdkp->nr_zones != nr_zones ||
473 disk->queue->nr_zones != nr_zones) { 479 disk->queue->nr_zones != nr_zones) {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index e2fa3f476227..f564af8949e8 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -323,7 +323,7 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
323static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, 323static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
324 struct pqi_scsi_dev *device) 324 struct pqi_scsi_dev *device)
325{ 325{
326 return device->in_remove & !ctrl_info->in_shutdown; 326 return device->in_remove && !ctrl_info->in_shutdown;
327} 327}
328 328
329static inline void pqi_schedule_rescan_worker_with_delay( 329static inline void pqi_schedule_rescan_worker_with_delay(
@@ -3576,9 +3576,9 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3576 alloc_length += PQI_EXTRA_SGL_MEMORY; 3576 alloc_length += PQI_EXTRA_SGL_MEMORY;
3577 3577
3578 ctrl_info->queue_memory_base = 3578 ctrl_info->queue_memory_base =
3579 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3579 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3580 alloc_length, 3580 &ctrl_info->queue_memory_base_dma_handle,
3581 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); 3581 GFP_KERNEL);
3582 3582
3583 if (!ctrl_info->queue_memory_base) 3583 if (!ctrl_info->queue_memory_base)
3584 return -ENOMEM; 3584 return -ENOMEM;
@@ -3715,10 +3715,9 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3715 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3715 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3716 3716
3717 ctrl_info->admin_queue_memory_base = 3717 ctrl_info->admin_queue_memory_base =
3718 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3718 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3719 alloc_length, 3719 &ctrl_info->admin_queue_memory_base_dma_handle,
3720 &ctrl_info->admin_queue_memory_base_dma_handle, 3720 GFP_KERNEL);
3721 GFP_KERNEL);
3722 3721
3723 if (!ctrl_info->admin_queue_memory_base) 3722 if (!ctrl_info->admin_queue_memory_base)
3724 return -ENOMEM; 3723 return -ENOMEM;
@@ -4602,9 +4601,10 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4602 4601
4603static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4602static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4604{ 4603{
4605 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 4604 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4606 ctrl_info->error_buffer_length, 4605 ctrl_info->error_buffer_length,
4607 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); 4606 &ctrl_info->error_buffer_dma_handle,
4607 GFP_KERNEL);
4608 4608
4609 if (!ctrl_info->error_buffer) 4609 if (!ctrl_info->error_buffer)
4610 return -ENOMEM; 4610 return -ENOMEM;
@@ -7487,8 +7487,8 @@ static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7487 dma_addr_t dma_handle; 7487 dma_addr_t dma_handle;
7488 7488
7489 ctrl_info->pqi_ofa_chunk_virt_addr[i] = 7489 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7490 dma_zalloc_coherent(dev, chunk_size, &dma_handle, 7490 dma_alloc_coherent(dev, chunk_size, &dma_handle,
7491 GFP_KERNEL); 7491 GFP_KERNEL);
7492 7492
7493 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 7493 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7494 break; 7494 break;
@@ -7545,10 +7545,10 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7545 struct device *dev; 7545 struct device *dev;
7546 7546
7547 dev = &ctrl_info->pci_dev->dev; 7547 dev = &ctrl_info->pci_dev->dev;
7548 pqi_ofa_memory = dma_zalloc_coherent(dev, 7548 pqi_ofa_memory = dma_alloc_coherent(dev,
7549 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7549 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7550 &ctrl_info->pqi_ofa_mem_dma_handle, 7550 &ctrl_info->pqi_ofa_mem_dma_handle,
7551 GFP_KERNEL); 7551 GFP_KERNEL);
7552 7552
7553 if (!pqi_ofa_memory) 7553 if (!pqi_ofa_memory)
7554 return; 7554 return;
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index dd65fea07687..6d176815e6ce 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -195,7 +195,7 @@ enum ufs_desc_def_size {
195 QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, 195 QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
196 QUERY_DESC_UNIT_DEF_SIZE = 0x23, 196 QUERY_DESC_UNIT_DEF_SIZE = 0x23,
197 QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, 197 QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
198 QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, 198 QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
199 QUERY_DESC_POWER_DEF_SIZE = 0x62, 199 QUERY_DESC_POWER_DEF_SIZE = 0x62,
200 QUERY_DESC_HEALTH_DEF_SIZE = 0x25, 200 QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
201}; 201};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9ba7671b84f8..2ddf24466a62 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -108,13 +108,19 @@
108int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, 108int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
109 const char *prefix) 109 const char *prefix)
110{ 110{
111 u8 *regs; 111 u32 *regs;
112 size_t pos;
113
114 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
115 return -EINVAL;
112 116
113 regs = kzalloc(len, GFP_KERNEL); 117 regs = kzalloc(len, GFP_KERNEL);
114 if (!regs) 118 if (!regs)
115 return -ENOMEM; 119 return -ENOMEM;
116 120
117 memcpy_fromio(regs, hba->mmio_base + offset, len); 121 for (pos = 0; pos < len; pos += 4)
122 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
123
118 ufshcd_hex_dump(prefix, regs, len); 124 ufshcd_hex_dump(prefix, regs, len);
119 kfree(regs); 125 kfree(regs);
120 126
@@ -8001,6 +8007,8 @@ out:
8001 trace_ufshcd_system_resume(dev_name(hba->dev), ret, 8007 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8002 ktime_to_us(ktime_sub(ktime_get(), start)), 8008 ktime_to_us(ktime_sub(ktime_get(), start)),
8003 hba->curr_dev_pwr_mode, hba->uic_link_state); 8009 hba->curr_dev_pwr_mode, hba->uic_link_state);
8010 if (!ret)
8011 hba->is_sys_suspended = false;
8004 return ret; 8012 return ret;
8005} 8013}
8006EXPORT_SYMBOL(ufshcd_system_resume); 8014EXPORT_SYMBOL(ufshcd_system_resume);
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index 9436aa83ff1b..e6d48dccb8d5 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -62,7 +62,7 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
62 return -ENODEV; 62 return -ENODEV;
63 } 63 }
64 64
65 if (!dma_zalloc_coherent(dev, *size, addr, 0)) { 65 if (!dma_alloc_coherent(dev, *size, addr, 0)) {
66 dev_err(dev, "DMA Alloc memory failed\n"); 66 dev_err(dev, "DMA Alloc memory failed\n");
67 return -ENODEV; 67 return -ENODEV;
68 } 68 }
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 52c153cd795a..636f83f781f5 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1143,18 +1143,19 @@ static void qm_mr_process_task(struct work_struct *work);
1143static irqreturn_t portal_isr(int irq, void *ptr) 1143static irqreturn_t portal_isr(int irq, void *ptr)
1144{ 1144{
1145 struct qman_portal *p = ptr; 1145 struct qman_portal *p = ptr;
1146
1147 u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
1148 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; 1146 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1147 u32 clear = 0;
1149 1148
1150 if (unlikely(!is)) 1149 if (unlikely(!is))
1151 return IRQ_NONE; 1150 return IRQ_NONE;
1152 1151
1153 /* DQRR-handling if it's interrupt-driven */ 1152 /* DQRR-handling if it's interrupt-driven */
1154 if (is & QM_PIRQ_DQRI) 1153 if (is & QM_PIRQ_DQRI) {
1155 __poll_portal_fast(p, QMAN_POLL_LIMIT); 1154 __poll_portal_fast(p, QMAN_POLL_LIMIT);
1155 clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
1156 }
1156 /* Handling of anything else that's interrupt-driven */ 1157 /* Handling of anything else that's interrupt-driven */
1157 clear |= __poll_portal_slow(p, is); 1158 clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
1158 qm_out(&p->p, QM_REG_ISR, clear); 1159 qm_out(&p->p, QM_REG_ISR, clear);
1159 return IRQ_HANDLED; 1160 return IRQ_HANDLED;
1160} 1161}
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index f78c34647ca2..76480df195a8 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -44,10 +44,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
44 const char *sprop; 44 const char *sprop;
45 int ret = 0; 45 int ret = 0;
46 u32 val; 46 u32 val;
47 struct resource *res;
48 struct device_node *np2;
49 static int siram_init_flag;
50 struct platform_device *pdev;
51 47
52 sprop = of_get_property(np, "fsl,rx-sync-clock", NULL); 48 sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
53 if (sprop) { 49 if (sprop) {
@@ -124,57 +120,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
124 utdm->siram_entry_id = val; 120 utdm->siram_entry_id = val;
125 121
126 set_si_param(utdm, ut_info); 122 set_si_param(utdm, ut_info);
127
128 np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
129 if (!np2)
130 return -EINVAL;
131
132 pdev = of_find_device_by_node(np2);
133 if (!pdev) {
134 pr_err("%pOFn: failed to lookup pdev\n", np2);
135 of_node_put(np2);
136 return -EINVAL;
137 }
138
139 of_node_put(np2);
140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
141 utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
142 if (IS_ERR(utdm->si_regs)) {
143 ret = PTR_ERR(utdm->si_regs);
144 goto err_miss_siram_property;
145 }
146
147 np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
148 if (!np2) {
149 ret = -EINVAL;
150 goto err_miss_siram_property;
151 }
152
153 pdev = of_find_device_by_node(np2);
154 if (!pdev) {
155 ret = -EINVAL;
156 pr_err("%pOFn: failed to lookup pdev\n", np2);
157 of_node_put(np2);
158 goto err_miss_siram_property;
159 }
160
161 of_node_put(np2);
162 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
163 utdm->siram = devm_ioremap_resource(&pdev->dev, res);
164 if (IS_ERR(utdm->siram)) {
165 ret = PTR_ERR(utdm->siram);
166 goto err_miss_siram_property;
167 }
168
169 if (siram_init_flag == 0) {
170 memset_io(utdm->siram, 0, resource_size(res));
171 siram_init_flag = 1;
172 }
173
174 return ret;
175
176err_miss_siram_property:
177 devm_iounmap(&pdev->dev, utdm->si_regs);
178 return ret; 123 return ret;
179} 124}
180EXPORT_SYMBOL(ucc_of_parse_tdm); 125EXPORT_SYMBOL(ucc_of_parse_tdm);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 4d8012e1205c..68bfca6f20dd 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -44,7 +44,7 @@ config ARCH_RZN1
44 bool 44 bool
45 select ARM_AMBA 45 select ARM_AMBA
46 46
47if ARM 47if ARM && ARCH_RENESAS
48 48
49#comment "Renesas ARM SoCs System Type" 49#comment "Renesas ARM SoCs System Type"
50 50
diff --git a/drivers/soc/renesas/r8a774c0-sysc.c b/drivers/soc/renesas/r8a774c0-sysc.c
index e1ac4c0f6640..11050e17ea81 100644
--- a/drivers/soc/renesas/r8a774c0-sysc.c
+++ b/drivers/soc/renesas/r8a774c0-sysc.c
@@ -28,19 +28,6 @@ static struct rcar_sysc_area r8a774c0_areas[] __initdata = {
28 { "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A }, 28 { "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A },
29}; 29};
30 30
31static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas,
32 unsigned int num_areas, u8 id,
33 int new_parent)
34{
35 unsigned int i;
36
37 for (i = 0; i < num_areas; i++)
38 if (areas[i].isr_bit == id) {
39 areas[i].parent = new_parent;
40 return;
41 }
42}
43
44/* Fixups for RZ/G2E ES1.0 revision */ 31/* Fixups for RZ/G2E ES1.0 revision */
45static const struct soc_device_attribute r8a774c0[] __initconst = { 32static const struct soc_device_attribute r8a774c0[] __initconst = {
46 { .soc_id = "r8a774c0", .revision = "ES1.0" }, 33 { .soc_id = "r8a774c0", .revision = "ES1.0" },
@@ -50,12 +37,10 @@ static const struct soc_device_attribute r8a774c0[] __initconst = {
50static int __init r8a774c0_sysc_init(void) 37static int __init r8a774c0_sysc_init(void)
51{ 38{
52 if (soc_device_match(r8a774c0)) { 39 if (soc_device_match(r8a774c0)) {
53 rcar_sysc_fix_parent(r8a774c0_areas, 40 /* Fix incorrect 3DG hierarchy */
54 ARRAY_SIZE(r8a774c0_areas), 41 swap(r8a774c0_areas[6], r8a774c0_areas[7]);
55 R8A774C0_PD_3DG_A, R8A774C0_PD_3DG_B); 42 r8a774c0_areas[6].parent = R8A774C0_PD_ALWAYS_ON;
56 rcar_sysc_fix_parent(r8a774c0_areas, 43 r8a774c0_areas[7].parent = R8A774C0_PD_3DG_B;
57 ARRAY_SIZE(r8a774c0_areas),
58 R8A774C0_PD_3DG_B, R8A774C0_PD_ALWAYS_ON);
59 } 44 }
60 45
61 return 0; 46 return 0;
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
index d7e4e18ec3df..1ae9af5f17ec 100644
--- a/drivers/spi/spi-pic32-sqi.c
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -466,9 +466,9 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
466 int i; 466 int i;
467 467
468 /* allocate coherent DMAable memory for hardware buffer descriptors. */ 468 /* allocate coherent DMAable memory for hardware buffer descriptors. */
469 sqi->bd = dma_zalloc_coherent(&sqi->master->dev, 469 sqi->bd = dma_alloc_coherent(&sqi->master->dev,
470 sizeof(*bd) * PESQI_BD_COUNT, 470 sizeof(*bd) * PESQI_BD_COUNT,
471 &sqi->bd_dma, GFP_KERNEL); 471 &sqi->bd_dma, GFP_KERNEL);
472 if (!sqi->bd) { 472 if (!sqi->bd) {
473 dev_err(&sqi->master->dev, "failed allocating dma buffer\n"); 473 dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
474 return -ENOMEM; 474 return -ENOMEM;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a0802de8c3a1..6f5afab7c1a1 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -248,10 +248,10 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
248 struct ion_dma_buf_attachment *a = attachment->priv; 248 struct ion_dma_buf_attachment *a = attachment->priv;
249 struct ion_buffer *buffer = dmabuf->priv; 249 struct ion_buffer *buffer = dmabuf->priv;
250 250
251 free_duped_table(a->table);
252 mutex_lock(&buffer->lock); 251 mutex_lock(&buffer->lock);
253 list_del(&a->list); 252 list_del(&a->list);
254 mutex_unlock(&buffer->lock); 253 mutex_unlock(&buffer->lock);
254 free_duped_table(a->table);
255 255
256 kfree(a); 256 kfree(a);
257} 257}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
index 21a76a8ccc26..6027b19f7bc2 100644
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c
@@ -1396,8 +1396,7 @@ static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
1396 if (!ring->tx_buf) 1396 if (!ring->tx_buf)
1397 goto no_tx_mem; 1397 goto no_tx_mem;
1398 1398
1399 ring->tx_dma = dma_zalloc_coherent(eth->dev, 1399 ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
1400 ring->tx_ring_size * sz,
1401 &ring->tx_phys, 1400 &ring->tx_phys,
1402 GFP_ATOMIC | __GFP_ZERO); 1401 GFP_ATOMIC | __GFP_ZERO);
1403 if (!ring->tx_dma) 1402 if (!ring->tx_dma)
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 2848fa71a33d..d6248eecf123 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -170,7 +170,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
170 return -ENODEV; 170 return -ENODEV;
171 171
172 priv->last_link = 0; 172 priv->last_link = 0;
173 phy_start_aneg(phydev); 173 phy_start(phydev);
174 174
175 return 0; 175 return 0;
176no_phy: 176no_phy:
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 364d6ea14bf8..2f90f60f1681 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -154,7 +154,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
154 154
155 pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset; 155 pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
156 156
157 crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); 157 crypto_ops = lib80211_get_crypto_ops("WEP");
158 158
159 if (!crypto_ops) 159 if (!crypto_ops)
160 return; 160 return;
@@ -210,7 +210,7 @@ int rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
210 void *crypto_private = NULL; 210 void *crypto_private = NULL;
211 int status = _SUCCESS; 211 int status = _SUCCESS;
212 const int keyindex = prxattrib->key_index; 212 const int keyindex = prxattrib->key_index;
213 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); 213 struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP");
214 char iv[4], icv[4]; 214 char iv[4], icv[4];
215 215
216 if (!crypto_ops) { 216 if (!crypto_ops) {
@@ -1291,7 +1291,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
1291 struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt; 1291 struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
1292 void *crypto_private = NULL; 1292 void *crypto_private = NULL;
1293 u8 *key, *pframe = skb->data; 1293 u8 *key, *pframe = skb->data;
1294 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp"); 1294 struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP");
1295 struct security_priv *psecuritypriv = &padapter->securitypriv; 1295 struct security_priv *psecuritypriv = &padapter->securitypriv;
1296 char iv[8], icv[8]; 1296 char iv[8], icv[8];
1297 1297
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 28cbd6b3d26c..dfee6985efa6 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
35 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 35 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
36 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ 36 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
37 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ 37 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
38 {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
38 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ 39 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
39 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ 40 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
40 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ 41 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index bcc8dfa8e672..9efb4dcb9d3a 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -850,18 +850,18 @@ enum ieee80211_state {
850#define IP_FMT "%pI4" 850#define IP_FMT "%pI4"
851#define IP_ARG(x) (x) 851#define IP_ARG(x) (x)
852 852
853extern __inline int is_multicast_mac_addr(const u8 *addr) 853static inline int is_multicast_mac_addr(const u8 *addr)
854{ 854{
855 return ((addr[0] != 0xff) && (0x01 & addr[0])); 855 return ((addr[0] != 0xff) && (0x01 & addr[0]));
856} 856}
857 857
858extern __inline int is_broadcast_mac_addr(const u8 *addr) 858static inline int is_broadcast_mac_addr(const u8 *addr)
859{ 859{
860 return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \ 860 return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
861 (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff)); 861 (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
862} 862}
863 863
864extern __inline int is_zero_mac_addr(const u8 *addr) 864static inline int is_zero_mac_addr(const u8 *addr)
865{ 865{
866 return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \ 866 return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
867 (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00)); 867 (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 7c03b69b8ed3..6d02904de63f 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -22,7 +22,7 @@ static const struct sdio_device_id sdio_ids[] =
22 { SDIO_DEVICE(0x024c, 0xb723), }, 22 { SDIO_DEVICE(0x024c, 0xb723), },
23 { /* end: all zeroes */ }, 23 { /* end: all zeroes */ },
24}; 24};
25static const struct acpi_device_id acpi_ids[] __used = { 25static const struct acpi_device_id acpi_ids[] = {
26 {"OBDA8723", 0x0000}, 26 {"OBDA8723", 0x0000},
27 {} 27 {}
28}; 28};
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index c92bbd05516e..005de0024dd4 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch)
265 return; 265 return;
266 } 266 }
267 267
268 speakup_tty->ops->send_xchar(speakup_tty, ch); 268 if (speakup_tty->ops->send_xchar)
269 speakup_tty->ops->send_xchar(speakup_tty, ch);
269 mutex_unlock(&speakup_tty_mutex); 270 mutex_unlock(&speakup_tty_mutex);
270} 271}
271 272
@@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
277 return; 278 return;
278 } 279 }
279 280
280 speakup_tty->ops->tiocmset(speakup_tty, set, clear); 281 if (speakup_tty->ops->tiocmset)
282 speakup_tty->ops->tiocmset(speakup_tty, set, clear);
281 mutex_unlock(&speakup_tty_mutex); 283 mutex_unlock(&speakup_tty_mutex);
282} 284}
283 285
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 338b6e952515..dd4898861b83 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -407,10 +407,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
407 /* Allocate enough storage to hold the page pointers and the page 407 /* Allocate enough storage to hold the page pointers and the page
408 * list 408 * list
409 */ 409 */
410 pagelist = dma_zalloc_coherent(g_dev, 410 pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
411 pagelist_size, 411 GFP_KERNEL);
412 &dma_addr,
413 GFP_KERNEL);
414 412
415 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); 413 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
416 414
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 9e17ec651bde..53f5a1cb4636 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -446,6 +446,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
446static inline void 446static inline void
447remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) 447remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
448{ 448{
449 event->fired = 1;
449 event->armed = 0; 450 event->armed = 0;
450 wake_up_all(wq); 451 wake_up_all(wq);
451} 452}
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 1ab0e8562d40..c9097e7367d8 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -440,12 +440,9 @@ static bool device_init_rings(struct vnt_private *priv)
440 void *vir_pool; 440 void *vir_pool;
441 441
442 /*allocate all RD/TD rings a single pool*/ 442 /*allocate all RD/TD rings a single pool*/
443 vir_pool = dma_zalloc_coherent(&priv->pcid->dev, 443 vir_pool = dma_alloc_coherent(&priv->pcid->dev,
444 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + 444 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
445 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + 445 &priv->pool_dma, GFP_ATOMIC);
446 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
447 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
448 &priv->pool_dma, GFP_ATOMIC);
449 if (!vir_pool) { 446 if (!vir_pool) {
450 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n"); 447 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
451 return false; 448 return false;
@@ -459,13 +456,9 @@ static bool device_init_rings(struct vnt_private *priv)
459 priv->rd1_pool_dma = priv->rd0_pool_dma + 456 priv->rd1_pool_dma = priv->rd0_pool_dma +
460 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc); 457 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
461 458
462 priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev, 459 priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev,
463 priv->opts.tx_descs[0] * PKT_BUF_SZ + 460 priv->opts.tx_descs[0] * PKT_BUF_SZ + priv->opts.tx_descs[1] * PKT_BUF_SZ + CB_BEACON_BUF_SIZE + CB_MAX_BUF_SIZE,
464 priv->opts.tx_descs[1] * PKT_BUF_SZ + 461 &priv->tx_bufs_dma0, GFP_ATOMIC);
465 CB_BEACON_BUF_SIZE +
466 CB_MAX_BUF_SIZE,
467 &priv->tx_bufs_dma0,
468 GFP_ATOMIC);
469 if (!priv->tx0_bufs) { 462 if (!priv->tx0_bufs) {
470 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n"); 463 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
471 464
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 70c854d939ce..3d0badc34825 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -36,7 +36,7 @@ struct wilc_op_mode {
36struct wilc_reg_frame { 36struct wilc_reg_frame {
37 bool reg; 37 bool reg;
38 u8 reg_id; 38 u8 reg_id;
39 __le32 frame_type; 39 __le16 frame_type;
40} __packed; 40} __packed;
41 41
42struct wilc_drv_handler { 42struct wilc_drv_handler {
@@ -1744,7 +1744,6 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
1744 result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, 1744 result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
1745 ARRAY_SIZE(wid_list), 1745 ARRAY_SIZE(wid_list),
1746 wilc_get_vif_idx(vif)); 1746 wilc_get_vif_idx(vif));
1747 kfree(gtk_key);
1748 } else if (mode == WILC_STATION_MODE) { 1747 } else if (mode == WILC_STATION_MODE) {
1749 struct wid wid; 1748 struct wid wid;
1750 1749
@@ -1754,9 +1753,9 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
1754 wid.val = (u8 *)gtk_key; 1753 wid.val = (u8 *)gtk_key;
1755 result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1, 1754 result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
1756 wilc_get_vif_idx(vif)); 1755 wilc_get_vif_idx(vif));
1757 kfree(gtk_key);
1758 } 1756 }
1759 1757
1758 kfree(gtk_key);
1760 return result; 1759 return result;
1761} 1760}
1762 1761
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 3c5e9e030cad..489e5a5038f8 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -1252,21 +1252,22 @@ static u32 init_chip(struct net_device *dev)
1252 ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg); 1252 ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
1253 if (!ret) { 1253 if (!ret) {
1254 netdev_err(dev, "fail read reg 0x1118\n"); 1254 netdev_err(dev, "fail read reg 0x1118\n");
1255 return ret; 1255 goto release;
1256 } 1256 }
1257 reg |= BIT(0); 1257 reg |= BIT(0);
1258 ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg); 1258 ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
1259 if (!ret) { 1259 if (!ret) {
1260 netdev_err(dev, "fail write reg 0x1118\n"); 1260 netdev_err(dev, "fail write reg 0x1118\n");
1261 return ret; 1261 goto release;
1262 } 1262 }
1263 ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71); 1263 ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
1264 if (!ret) { 1264 if (!ret) {
1265 netdev_err(dev, "fail write reg 0xc0000\n"); 1265 netdev_err(dev, "fail write reg 0xc0000\n");
1266 return ret; 1266 goto release;
1267 } 1267 }
1268 } 1268 }
1269 1269
1270release:
1270 release_bus(wilc, WILC_BUS_RELEASE_ONLY); 1271 release_bus(wilc, WILC_BUS_RELEASE_ONLY);
1271 1272
1272 return ret; 1273 return ret;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 984941e036c8..bd15a564fe24 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -714,7 +714,7 @@ static int __init iscsi_target_init_module(void)
714 sizeof(struct iscsi_queue_req), 714 sizeof(struct iscsi_queue_req),
715 __alignof__(struct iscsi_queue_req), 0, NULL); 715 __alignof__(struct iscsi_queue_req), 0, NULL);
716 if (!lio_qr_cache) { 716 if (!lio_qr_cache) {
717 pr_err("nable to kmem_cache_create() for" 717 pr_err("Unable to kmem_cache_create() for"
718 " lio_qr_cache\n"); 718 " lio_qr_cache\n");
719 goto bitmap_out; 719 goto bitmap_out;
720 } 720 }
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 72016d0dfca5..8e7fffbb8802 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -852,6 +852,12 @@ static ssize_t pi_prot_type_store(struct config_item *item,
852 return count; 852 return count;
853} 853}
854 854
855/* always zero, but attr needs to remain RW to avoid userspace breakage */
856static ssize_t pi_prot_format_show(struct config_item *item, char *page)
857{
858 return snprintf(page, PAGE_SIZE, "0\n");
859}
860
855static ssize_t pi_prot_format_store(struct config_item *item, 861static ssize_t pi_prot_format_store(struct config_item *item,
856 const char *page, size_t count) 862 const char *page, size_t count)
857{ 863{
@@ -1132,7 +1138,7 @@ CONFIGFS_ATTR(, emulate_3pc);
1132CONFIGFS_ATTR(, emulate_pr); 1138CONFIGFS_ATTR(, emulate_pr);
1133CONFIGFS_ATTR(, pi_prot_type); 1139CONFIGFS_ATTR(, pi_prot_type);
1134CONFIGFS_ATTR_RO(, hw_pi_prot_type); 1140CONFIGFS_ATTR_RO(, hw_pi_prot_type);
1135CONFIGFS_ATTR_WO(, pi_prot_format); 1141CONFIGFS_ATTR(, pi_prot_format);
1136CONFIGFS_ATTR(, pi_prot_verify); 1142CONFIGFS_ATTR(, pi_prot_verify);
1137CONFIGFS_ATTR(, enforce_pr_isids); 1143CONFIGFS_ATTR(, enforce_pr_isids);
1138CONFIGFS_ATTR(, is_nonrot); 1144CONFIGFS_ATTR(, is_nonrot);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 1e6d24943565..5831e0eecea1 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -148,7 +148,7 @@ struct tcmu_dev {
148 size_t ring_size; 148 size_t ring_size;
149 149
150 struct mutex cmdr_lock; 150 struct mutex cmdr_lock;
151 struct list_head cmdr_queue; 151 struct list_head qfull_queue;
152 152
153 uint32_t dbi_max; 153 uint32_t dbi_max;
154 uint32_t dbi_thresh; 154 uint32_t dbi_thresh;
@@ -159,6 +159,7 @@ struct tcmu_dev {
159 159
160 struct timer_list cmd_timer; 160 struct timer_list cmd_timer;
161 unsigned int cmd_time_out; 161 unsigned int cmd_time_out;
162 struct list_head inflight_queue;
162 163
163 struct timer_list qfull_timer; 164 struct timer_list qfull_timer;
164 int qfull_time_out; 165 int qfull_time_out;
@@ -179,7 +180,7 @@ struct tcmu_dev {
179struct tcmu_cmd { 180struct tcmu_cmd {
180 struct se_cmd *se_cmd; 181 struct se_cmd *se_cmd;
181 struct tcmu_dev *tcmu_dev; 182 struct tcmu_dev *tcmu_dev;
182 struct list_head cmdr_queue_entry; 183 struct list_head queue_entry;
183 184
184 uint16_t cmd_id; 185 uint16_t cmd_id;
185 186
@@ -192,6 +193,7 @@ struct tcmu_cmd {
192 unsigned long deadline; 193 unsigned long deadline;
193 194
194#define TCMU_CMD_BIT_EXPIRED 0 195#define TCMU_CMD_BIT_EXPIRED 0
196#define TCMU_CMD_BIT_INFLIGHT 1
195 unsigned long flags; 197 unsigned long flags;
196}; 198};
197/* 199/*
@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
586 if (!tcmu_cmd) 588 if (!tcmu_cmd)
587 return NULL; 589 return NULL;
588 590
589 INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); 591 INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
590 tcmu_cmd->se_cmd = se_cmd; 592 tcmu_cmd->se_cmd = se_cmd;
591 tcmu_cmd->tcmu_dev = udev; 593 tcmu_cmd->tcmu_dev = udev;
592 594
@@ -915,11 +917,13 @@ setup_timer:
915 return 0; 917 return 0;
916 918
917 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 919 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
918 mod_timer(timer, tcmu_cmd->deadline); 920 if (!timer_pending(timer))
921 mod_timer(timer, tcmu_cmd->deadline);
922
919 return 0; 923 return 0;
920} 924}
921 925
922static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) 926static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
923{ 927{
924 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 928 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
925 unsigned int tmo; 929 unsigned int tmo;
@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
942 if (ret) 946 if (ret)
943 return ret; 947 return ret;
944 948
945 list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); 949 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
946 pr_debug("adding cmd %u on dev %s to ring space wait queue\n", 950 pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
947 tcmu_cmd->cmd_id, udev->name); 951 tcmu_cmd->cmd_id, udev->name);
948 return 0; 952 return 0;
@@ -999,7 +1003,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
999 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); 1003 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
1000 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1004 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1001 1005
1002 if (!list_empty(&udev->cmdr_queue)) 1006 if (!list_empty(&udev->qfull_queue))
1003 goto queue; 1007 goto queue;
1004 1008
1005 mb = udev->mb_addr; 1009 mb = udev->mb_addr;
@@ -1096,13 +1100,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
1096 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1100 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1097 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1101 tcmu_flush_dcache_range(mb, sizeof(*mb));
1098 1102
1103 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1104 set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
1105
1099 /* TODO: only if FLUSH and FUA? */ 1106 /* TODO: only if FLUSH and FUA? */
1100 uio_event_notify(&udev->uio_info); 1107 uio_event_notify(&udev->uio_info);
1101 1108
1102 return 0; 1109 return 0;
1103 1110
1104queue: 1111queue:
1105 if (add_to_cmdr_queue(tcmu_cmd)) { 1112 if (add_to_qfull_queue(tcmu_cmd)) {
1106 *scsi_err = TCM_OUT_OF_RESOURCES; 1113 *scsi_err = TCM_OUT_OF_RESOURCES;
1107 return -1; 1114 return -1;
1108 } 1115 }
@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1145 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1152 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
1146 goto out; 1153 goto out;
1147 1154
1155 list_del_init(&cmd->queue_entry);
1156
1148 tcmu_cmd_reset_dbi_cur(cmd); 1157 tcmu_cmd_reset_dbi_cur(cmd);
1149 1158
1150 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 1159 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
@@ -1194,9 +1203,29 @@ out:
1194 tcmu_free_cmd(cmd); 1203 tcmu_free_cmd(cmd);
1195} 1204}
1196 1205
1206static void tcmu_set_next_deadline(struct list_head *queue,
1207 struct timer_list *timer)
1208{
1209 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1210 unsigned long deadline = 0;
1211
1212 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
1213 if (!time_after(jiffies, tcmu_cmd->deadline)) {
1214 deadline = tcmu_cmd->deadline;
1215 break;
1216 }
1217 }
1218
1219 if (deadline)
1220 mod_timer(timer, deadline);
1221 else
1222 del_timer(timer);
1223}
1224
1197static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 1225static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1198{ 1226{
1199 struct tcmu_mailbox *mb; 1227 struct tcmu_mailbox *mb;
1228 struct tcmu_cmd *cmd;
1200 int handled = 0; 1229 int handled = 0;
1201 1230
1202 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1231 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1210 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1239 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1211 1240
1212 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 1241 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
1213 struct tcmu_cmd *cmd;
1214 1242
1215 tcmu_flush_dcache_range(entry, sizeof(*entry)); 1243 tcmu_flush_dcache_range(entry, sizeof(*entry));
1216 1244
@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1243 /* no more pending commands */ 1271 /* no more pending commands */
1244 del_timer(&udev->cmd_timer); 1272 del_timer(&udev->cmd_timer);
1245 1273
1246 if (list_empty(&udev->cmdr_queue)) { 1274 if (list_empty(&udev->qfull_queue)) {
1247 /* 1275 /*
1248 * no more pending or waiting commands so try to 1276 * no more pending or waiting commands so try to
1249 * reclaim blocks if needed. 1277 * reclaim blocks if needed.
@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1252 tcmu_global_max_blocks) 1280 tcmu_global_max_blocks)
1253 schedule_delayed_work(&tcmu_unmap_work, 0); 1281 schedule_delayed_work(&tcmu_unmap_work, 0);
1254 } 1282 }
1283 } else if (udev->cmd_time_out) {
1284 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1255 } 1285 }
1256 1286
1257 return handled; 1287 return handled;
@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1271 if (!time_after(jiffies, cmd->deadline)) 1301 if (!time_after(jiffies, cmd->deadline))
1272 return 0; 1302 return 0;
1273 1303
1274 is_running = list_empty(&cmd->cmdr_queue_entry); 1304 is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
1275 se_cmd = cmd->se_cmd; 1305 se_cmd = cmd->se_cmd;
1276 1306
1277 if (is_running) { 1307 if (is_running) {
@@ -1287,9 +1317,9 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1287 * target_complete_cmd will translate this to LUN COMM FAILURE 1317 * target_complete_cmd will translate this to LUN COMM FAILURE
1288 */ 1318 */
1289 scsi_status = SAM_STAT_CHECK_CONDITION; 1319 scsi_status = SAM_STAT_CHECK_CONDITION;
1320 list_del_init(&cmd->queue_entry);
1290 } else { 1321 } else {
1291 list_del_init(&cmd->cmdr_queue_entry); 1322 list_del_init(&cmd->queue_entry);
1292
1293 idr_remove(&udev->commands, id); 1323 idr_remove(&udev->commands, id);
1294 tcmu_free_cmd(cmd); 1324 tcmu_free_cmd(cmd);
1295 scsi_status = SAM_STAT_TASK_SET_FULL; 1325 scsi_status = SAM_STAT_TASK_SET_FULL;
@@ -1372,7 +1402,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1372 1402
1373 INIT_LIST_HEAD(&udev->node); 1403 INIT_LIST_HEAD(&udev->node);
1374 INIT_LIST_HEAD(&udev->timedout_entry); 1404 INIT_LIST_HEAD(&udev->timedout_entry);
1375 INIT_LIST_HEAD(&udev->cmdr_queue); 1405 INIT_LIST_HEAD(&udev->qfull_queue);
1406 INIT_LIST_HEAD(&udev->inflight_queue);
1376 idr_init(&udev->commands); 1407 idr_init(&udev->commands);
1377 1408
1378 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 1409 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
@@ -1383,7 +1414,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1383 return &udev->se_dev; 1414 return &udev->se_dev;
1384} 1415}
1385 1416
1386static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) 1417static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
1387{ 1418{
1388 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1419 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1389 LIST_HEAD(cmds); 1420 LIST_HEAD(cmds);
@@ -1391,15 +1422,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
1391 sense_reason_t scsi_ret; 1422 sense_reason_t scsi_ret;
1392 int ret; 1423 int ret;
1393 1424
1394 if (list_empty(&udev->cmdr_queue)) 1425 if (list_empty(&udev->qfull_queue))
1395 return true; 1426 return true;
1396 1427
1397 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1428 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1398 1429
1399 list_splice_init(&udev->cmdr_queue, &cmds); 1430 list_splice_init(&udev->qfull_queue, &cmds);
1400 1431
1401 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { 1432 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1402 list_del_init(&tcmu_cmd->cmdr_queue_entry); 1433 list_del_init(&tcmu_cmd->queue_entry);
1403 1434
1404 pr_debug("removing cmd %u on dev %s from queue\n", 1435 pr_debug("removing cmd %u on dev %s from queue\n",
1405 tcmu_cmd->cmd_id, udev->name); 1436 tcmu_cmd->cmd_id, udev->name);
@@ -1437,14 +1468,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
1437 * cmd was requeued, so just put all cmds back in 1468 * cmd was requeued, so just put all cmds back in
1438 * the queue 1469 * the queue
1439 */ 1470 */
1440 list_splice_tail(&cmds, &udev->cmdr_queue); 1471 list_splice_tail(&cmds, &udev->qfull_queue);
1441 drained = false; 1472 drained = false;
1442 goto done; 1473 break;
1443 } 1474 }
1444 } 1475 }
1445 if (list_empty(&udev->cmdr_queue)) 1476
1446 del_timer(&udev->qfull_timer); 1477 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1447done:
1448 return drained; 1478 return drained;
1449} 1479}
1450 1480
@@ -1454,7 +1484,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1454 1484
1455 mutex_lock(&udev->cmdr_lock); 1485 mutex_lock(&udev->cmdr_lock);
1456 tcmu_handle_completions(udev); 1486 tcmu_handle_completions(udev);
1457 run_cmdr_queue(udev, false); 1487 run_qfull_queue(udev, false);
1458 mutex_unlock(&udev->cmdr_lock); 1488 mutex_unlock(&udev->cmdr_lock);
1459 1489
1460 return 0; 1490 return 0;
@@ -1982,7 +2012,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
1982 /* complete IO that has executed successfully */ 2012 /* complete IO that has executed successfully */
1983 tcmu_handle_completions(udev); 2013 tcmu_handle_completions(udev);
1984 /* fail IO waiting to be queued */ 2014 /* fail IO waiting to be queued */
1985 run_cmdr_queue(udev, true); 2015 run_qfull_queue(udev, true);
1986 2016
1987unlock: 2017unlock:
1988 mutex_unlock(&udev->cmdr_lock); 2018 mutex_unlock(&udev->cmdr_lock);
@@ -1997,7 +2027,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
1997 mutex_lock(&udev->cmdr_lock); 2027 mutex_lock(&udev->cmdr_lock);
1998 2028
1999 idr_for_each_entry(&udev->commands, cmd, i) { 2029 idr_for_each_entry(&udev->commands, cmd, i) {
2000 if (!list_empty(&cmd->cmdr_queue_entry)) 2030 if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
2001 continue; 2031 continue;
2002 2032
2003 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 2033 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
@@ -2006,6 +2036,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2006 2036
2007 idr_remove(&udev->commands, i); 2037 idr_remove(&udev->commands, i);
2008 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2038 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2039 list_del_init(&cmd->queue_entry);
2009 if (err_level == 1) { 2040 if (err_level == 1) {
2010 /* 2041 /*
2011 * Userspace was not able to start the 2042 * Userspace was not able to start the
@@ -2666,6 +2697,10 @@ static void check_timedout_devices(void)
2666 2697
2667 mutex_lock(&udev->cmdr_lock); 2698 mutex_lock(&udev->cmdr_lock);
2668 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 2699 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
2700
2701 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
2702 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
2703
2669 mutex_unlock(&udev->cmdr_lock); 2704 mutex_unlock(&udev->cmdr_lock);
2670 2705
2671 spin_lock_bh(&timed_out_udevs_lock); 2706 spin_lock_bh(&timed_out_udevs_lock);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index dfd23245f778..6fff16113628 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -774,7 +774,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
774 774
775 cdev = __cpufreq_cooling_register(np, policy, capacitance); 775 cdev = __cpufreq_cooling_register(np, policy, capacitance);
776 if (IS_ERR(cdev)) { 776 if (IS_ERR(cdev)) {
777 pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n", 777 pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n",
778 policy->cpu, PTR_ERR(cdev)); 778 policy->cpu, PTR_ERR(cdev));
779 cdev = NULL; 779 cdev = NULL;
780 } 780 }
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 0582bd12a239..0ca908d12750 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -4,7 +4,7 @@
4 4
5config INT340X_THERMAL 5config INT340X_THERMAL
6 tristate "ACPI INT340X thermal drivers" 6 tristate "ACPI INT340X thermal drivers"
7 depends on X86 && ACPI 7 depends on X86 && ACPI && PCI
8 select THERMAL_GOV_USER_SPACE 8 select THERMAL_GOV_USER_SPACE
9 select ACPI_THERMAL_REL 9 select ACPI_THERMAL_REL
10 select ACPI_FAN 10 select ACPI_FAN
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 284cf2c5a8fd..8e1cf4d789be 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
84 struct pci_dev *pci_dev; \ 84 struct pci_dev *pci_dev; \
85 struct platform_device *pdev; \ 85 struct platform_device *pdev; \
86 struct proc_thermal_device *proc_dev; \ 86 struct proc_thermal_device *proc_dev; \
87\ 87 \
88 if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
89 dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
90 return 0; \
91 } \
92 \
88 if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ 93 if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
89 pdev = to_platform_device(dev); \ 94 pdev = to_platform_device(dev); \
90 proc_dev = platform_get_drvdata(pdev); \ 95 proc_dev = platform_get_drvdata(pdev); \
@@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev,
298 *priv = proc_priv; 303 *priv = proc_priv;
299 304
300 ret = proc_thermal_read_ppcc(proc_priv); 305 ret = proc_thermal_read_ppcc(proc_priv);
301 if (!ret) {
302 ret = sysfs_create_group(&dev->kobj,
303 &power_limit_attribute_group);
304
305 }
306 if (ret) 306 if (ret)
307 return ret; 307 return ret;
308 308
@@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev,
316 316
317 proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops); 317 proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
318 if (IS_ERR(proc_priv->int340x_zone)) { 318 if (IS_ERR(proc_priv->int340x_zone)) {
319 ret = PTR_ERR(proc_priv->int340x_zone); 319 return PTR_ERR(proc_priv->int340x_zone);
320 goto remove_group;
321 } else 320 } else
322 ret = 0; 321 ret = 0;
323 322
@@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev,
331 330
332remove_zone: 331remove_zone:
333 int340x_thermal_zone_remove(proc_priv->int340x_zone); 332 int340x_thermal_zone_remove(proc_priv->int340x_zone);
334remove_group:
335 sysfs_remove_group(&proc_priv->dev->kobj,
336 &power_limit_attribute_group);
337 333
338 return ret; 334 return ret;
339} 335}
@@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev)
364 platform_set_drvdata(pdev, proc_priv); 360 platform_set_drvdata(pdev, proc_priv);
365 proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; 361 proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
366 362
367 return 0; 363 dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
364
365 return sysfs_create_group(&pdev->dev.kobj,
366 &power_limit_attribute_group);
368} 367}
369 368
370static int int3401_remove(struct platform_device *pdev) 369static int int3401_remove(struct platform_device *pdev)
@@ -423,7 +422,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
423 proc_priv->soc_dts = intel_soc_dts_iosf_init( 422 proc_priv->soc_dts = intel_soc_dts_iosf_init(
424 INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0); 423 INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
425 424
426 if (proc_priv->soc_dts && pdev->irq) { 425 if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
427 ret = pci_enable_msi(pdev); 426 ret = pci_enable_msi(pdev);
428 if (!ret) { 427 if (!ret) {
429 ret = request_threaded_irq(pdev->irq, NULL, 428 ret = request_threaded_irq(pdev->irq, NULL,
@@ -441,7 +440,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
441 dev_err(&pdev->dev, "No auxiliary DTSs enabled\n"); 440 dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
442 } 441 }
443 442
444 return 0; 443 dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
444
445 return sysfs_create_group(&pdev->dev.kobj,
446 &power_limit_attribute_group);
445} 447}
446 448
447static void proc_thermal_pci_remove(struct pci_dev *pdev) 449static void proc_thermal_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 4bfdb4a1e47d..2df059cc07e2 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -867,14 +867,14 @@ __init *thermal_of_build_thermal_zone(struct device_node *np)
867 867
868 ret = of_property_read_u32(np, "polling-delay-passive", &prop); 868 ret = of_property_read_u32(np, "polling-delay-passive", &prop);
869 if (ret < 0) { 869 if (ret < 0) {
870 pr_err("missing polling-delay-passive property\n"); 870 pr_err("%pOFn: missing polling-delay-passive property\n", np);
871 goto free_tz; 871 goto free_tz;
872 } 872 }
873 tz->passive_delay = prop; 873 tz->passive_delay = prop;
874 874
875 ret = of_property_read_u32(np, "polling-delay", &prop); 875 ret = of_property_read_u32(np, "polling-delay", &prop);
876 if (ret < 0) { 876 if (ret < 0) {
877 pr_err("missing polling-delay property\n"); 877 pr_err("%pOFn: missing polling-delay property\n", np);
878 goto free_tz; 878 goto free_tz;
879 } 879 }
880 tz->polling_delay = prop; 880 tz->polling_delay = prop;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 4164414d4c64..8bdf42bc8fc8 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
597 /* too large for caller's buffer */ 597 /* too large for caller's buffer */
598 ret = -EOVERFLOW; 598 ret = -EOVERFLOW;
599 } else { 599 } else {
600 __set_current_state(TASK_RUNNING);
600 if (copy_to_user(buf, rbuf->buf, rbuf->count)) 601 if (copy_to_user(buf, rbuf->buf, rbuf->count))
601 ret = -EFAULT; 602 ret = -EFAULT;
602 else 603 else
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 189ab1212d9a..e441221e04b9 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1070,15 +1070,16 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
1070 1070
1071 ret = 0; 1071 ret = 0;
1072 } 1072 }
1073 }
1074 1073
1075 /* Initialise interrupt backoff work if required */ 1074 /* Initialise interrupt backoff work if required */
1076 if (up->overrun_backoff_time_ms > 0) { 1075 if (up->overrun_backoff_time_ms > 0) {
1077 uart->overrun_backoff_time_ms = up->overrun_backoff_time_ms; 1076 uart->overrun_backoff_time_ms =
1078 INIT_DELAYED_WORK(&uart->overrun_backoff, 1077 up->overrun_backoff_time_ms;
1079 serial_8250_overrun_backoff_work); 1078 INIT_DELAYED_WORK(&uart->overrun_backoff,
1080 } else { 1079 serial_8250_overrun_backoff_work);
1081 uart->overrun_backoff_time_ms = 0; 1080 } else {
1081 uart->overrun_backoff_time_ms = 0;
1082 }
1082 } 1083 }
1083 1084
1084 mutex_unlock(&serial_mutex); 1085 mutex_unlock(&serial_mutex);
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index e2c407656fa6..c1fdbc0b6840 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -357,6 +357,9 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
357 if (dmacnt == 2) { 357 if (dmacnt == 2) {
358 data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), 358 data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma),
359 GFP_KERNEL); 359 GFP_KERNEL);
360 if (!data->dma)
361 return -ENOMEM;
362
360 data->dma->fn = mtk8250_dma_filter; 363 data->dma->fn = mtk8250_dma_filter;
361 data->dma->rx_size = MTK_UART_RX_SIZE; 364 data->dma->rx_size = MTK_UART_RX_SIZE;
362 data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER; 365 data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index f80a300b5d68..48bd694a5fa1 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3420,6 +3420,11 @@ static int
3420serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) 3420serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
3421{ 3421{
3422 int num_iomem, num_port, first_port = -1, i; 3422 int num_iomem, num_port, first_port = -1, i;
3423 int rc;
3424
3425 rc = serial_pci_is_class_communication(dev);
3426 if (rc)
3427 return rc;
3423 3428
3424 /* 3429 /*
3425 * Should we try to make guesses for multiport serial devices later? 3430 * Should we try to make guesses for multiport serial devices later?
@@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
3647 3652
3648 board = &pci_boards[ent->driver_data]; 3653 board = &pci_boards[ent->driver_data];
3649 3654
3650 rc = serial_pci_is_class_communication(dev);
3651 if (rc)
3652 return rc;
3653
3654 rc = serial_pci_is_blacklisted(dev); 3655 rc = serial_pci_is_blacklisted(dev);
3655 if (rc) 3656 if (rc)
3656 return rc; 3657 return rc;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 67b9bf3b500e..089a6f285d5e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -85,6 +85,18 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
85 with "earlycon=smh" on the kernel command line. The console is 85 with "earlycon=smh" on the kernel command line. The console is
86 enabled when early_param is processed. 86 enabled when early_param is processed.
87 87
88config SERIAL_EARLYCON_RISCV_SBI
89 bool "Early console using RISC-V SBI"
90 depends on RISCV
91 select SERIAL_CORE
92 select SERIAL_CORE_CONSOLE
93 select SERIAL_EARLYCON
94 help
95 Support for early debug console using RISC-V SBI. This enables
96 the console before standard serial driver is probed. This is enabled
97 with "earlycon=sbi" on the kernel command line. The console is
98 enabled when early_param is processed.
99
88config SERIAL_SB1250_DUART 100config SERIAL_SB1250_DUART
89 tristate "BCM1xxx on-chip DUART serial support" 101 tristate "BCM1xxx on-chip DUART serial support"
90 depends on SIBYTE_SB1xxx_SOC=y 102 depends on SIBYTE_SB1xxx_SOC=y
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 8c303736b7e8..1511e8a9f856 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SERIAL_CORE) += serial_core.o
7 7
8obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o 8obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
9obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o 9obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o
10obj-$(CONFIG_SERIAL_EARLYCON_RISCV_SBI) += earlycon-riscv-sbi.o
10 11
11# These Sparc drivers have to appear before others such as 8250 12# These Sparc drivers have to appear before others such as 8250
12# which share ttySx minor node space. Otherwise console device 13# which share ttySx minor node space. Otherwise console device
diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c
new file mode 100644
index 000000000000..ce81523c3113
--- /dev/null
+++ b/drivers/tty/serial/earlycon-riscv-sbi.c
@@ -0,0 +1,31 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * RISC-V SBI based earlycon
4 *
5 * Copyright (C) 2018 Anup Patel <anup@brainfault.org>
6 */
7#include <linux/kernel.h>
8#include <linux/console.h>
9#include <linux/init.h>
10#include <linux/serial_core.h>
11#include <asm/sbi.h>
12
13static void sbi_putc(struct uart_port *port, int c)
14{
15 sbi_console_putchar(c);
16}
17
18static void sbi_console_write(struct console *con,
19 const char *s, unsigned n)
20{
21 struct earlycon_device *dev = con->data;
22 uart_console_write(&dev->port, s, n, sbi_putc);
23}
24
25static int __init early_sbi_setup(struct earlycon_device *device,
26 const char *opt)
27{
28 device->con->write = sbi_console_write;
29 return 0;
30}
31EARLYCON_DECLARE(sbi, early_sbi_setup);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 241a48e5052c..debdd1b9e01a 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1697,7 +1697,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
1697 } 1697 }
1698 1698
1699 /* ask the core to calculate the divisor */ 1699 /* ask the core to calculate the divisor */
1700 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); 1700 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
1701 1701
1702 spin_lock_irqsave(&sport->port.lock, flags); 1702 spin_lock_irqsave(&sport->port.lock, flags);
1703 1703
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index e052b69ceb98..9de9f0f239a1 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -114,9 +114,9 @@ struct ltq_uart_port {
114 114
115static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg) 115static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg)
116{ 116{
117 u32 tmp = readl(reg); 117 u32 tmp = __raw_readl(reg);
118 118
119 writel((tmp & ~clear) | set, reg); 119 __raw_writel((tmp & ~clear) | set, reg);
120} 120}
121 121
122static inline struct 122static inline struct
@@ -144,7 +144,7 @@ lqasc_start_tx(struct uart_port *port)
144static void 144static void
145lqasc_stop_rx(struct uart_port *port) 145lqasc_stop_rx(struct uart_port *port)
146{ 146{
147 writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); 147 __raw_writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE);
148} 148}
149 149
150static int 150static int
@@ -153,11 +153,12 @@ lqasc_rx_chars(struct uart_port *port)
153 struct tty_port *tport = &port->state->port; 153 struct tty_port *tport = &port->state->port;
154 unsigned int ch = 0, rsr = 0, fifocnt; 154 unsigned int ch = 0, rsr = 0, fifocnt;
155 155
156 fifocnt = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; 156 fifocnt = __raw_readl(port->membase + LTQ_ASC_FSTAT) &
157 ASCFSTAT_RXFFLMASK;
157 while (fifocnt--) { 158 while (fifocnt--) {
158 u8 flag = TTY_NORMAL; 159 u8 flag = TTY_NORMAL;
159 ch = readb(port->membase + LTQ_ASC_RBUF); 160 ch = readb(port->membase + LTQ_ASC_RBUF);
160 rsr = (readl(port->membase + LTQ_ASC_STATE) 161 rsr = (__raw_readl(port->membase + LTQ_ASC_STATE)
161 & ASCSTATE_ANY) | UART_DUMMY_UER_RX; 162 & ASCSTATE_ANY) | UART_DUMMY_UER_RX;
162 tty_flip_buffer_push(tport); 163 tty_flip_buffer_push(tport);
163 port->icount.rx++; 164 port->icount.rx++;
@@ -217,7 +218,7 @@ lqasc_tx_chars(struct uart_port *port)
217 return; 218 return;
218 } 219 }
219 220
220 while (((readl(port->membase + LTQ_ASC_FSTAT) & 221 while (((__raw_readl(port->membase + LTQ_ASC_FSTAT) &
221 ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { 222 ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) {
222 if (port->x_char) { 223 if (port->x_char) {
223 writeb(port->x_char, port->membase + LTQ_ASC_TBUF); 224 writeb(port->x_char, port->membase + LTQ_ASC_TBUF);
@@ -245,7 +246,7 @@ lqasc_tx_int(int irq, void *_port)
245 unsigned long flags; 246 unsigned long flags;
246 struct uart_port *port = (struct uart_port *)_port; 247 struct uart_port *port = (struct uart_port *)_port;
247 spin_lock_irqsave(&ltq_asc_lock, flags); 248 spin_lock_irqsave(&ltq_asc_lock, flags);
248 writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); 249 __raw_writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
249 spin_unlock_irqrestore(&ltq_asc_lock, flags); 250 spin_unlock_irqrestore(&ltq_asc_lock, flags);
250 lqasc_start_tx(port); 251 lqasc_start_tx(port);
251 return IRQ_HANDLED; 252 return IRQ_HANDLED;
@@ -270,7 +271,7 @@ lqasc_rx_int(int irq, void *_port)
270 unsigned long flags; 271 unsigned long flags;
271 struct uart_port *port = (struct uart_port *)_port; 272 struct uart_port *port = (struct uart_port *)_port;
272 spin_lock_irqsave(&ltq_asc_lock, flags); 273 spin_lock_irqsave(&ltq_asc_lock, flags);
273 writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); 274 __raw_writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
274 lqasc_rx_chars(port); 275 lqasc_rx_chars(port);
275 spin_unlock_irqrestore(&ltq_asc_lock, flags); 276 spin_unlock_irqrestore(&ltq_asc_lock, flags);
276 return IRQ_HANDLED; 277 return IRQ_HANDLED;
@@ -280,7 +281,8 @@ static unsigned int
280lqasc_tx_empty(struct uart_port *port) 281lqasc_tx_empty(struct uart_port *port)
281{ 282{
282 int status; 283 int status;
283 status = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; 284 status = __raw_readl(port->membase + LTQ_ASC_FSTAT) &
285 ASCFSTAT_TXFFLMASK;
284 return status ? 0 : TIOCSER_TEMT; 286 return status ? 0 : TIOCSER_TEMT;
285} 287}
286 288
@@ -313,12 +315,12 @@ lqasc_startup(struct uart_port *port)
313 asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), 315 asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
314 port->membase + LTQ_ASC_CLC); 316 port->membase + LTQ_ASC_CLC);
315 317
316 writel(0, port->membase + LTQ_ASC_PISEL); 318 __raw_writel(0, port->membase + LTQ_ASC_PISEL);
317 writel( 319 __raw_writel(
318 ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | 320 ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) |
319 ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, 321 ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU,
320 port->membase + LTQ_ASC_TXFCON); 322 port->membase + LTQ_ASC_TXFCON);
321 writel( 323 __raw_writel(
322 ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) 324 ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK)
323 | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, 325 | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU,
324 port->membase + LTQ_ASC_RXFCON); 326 port->membase + LTQ_ASC_RXFCON);
@@ -350,7 +352,7 @@ lqasc_startup(struct uart_port *port)
350 goto err2; 352 goto err2;
351 } 353 }
352 354
353 writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, 355 __raw_writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
354 port->membase + LTQ_ASC_IRNREN); 356 port->membase + LTQ_ASC_IRNREN);
355 return 0; 357 return 0;
356 358
@@ -369,7 +371,7 @@ lqasc_shutdown(struct uart_port *port)
369 free_irq(ltq_port->rx_irq, port); 371 free_irq(ltq_port->rx_irq, port);
370 free_irq(ltq_port->err_irq, port); 372 free_irq(ltq_port->err_irq, port);
371 373
372 writel(0, port->membase + LTQ_ASC_CON); 374 __raw_writel(0, port->membase + LTQ_ASC_CON);
373 asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, 375 asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU,
374 port->membase + LTQ_ASC_RXFCON); 376 port->membase + LTQ_ASC_RXFCON);
375 asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, 377 asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
@@ -461,13 +463,13 @@ lqasc_set_termios(struct uart_port *port,
461 asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); 463 asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON);
462 464
463 /* now we can write the new baudrate into the register */ 465 /* now we can write the new baudrate into the register */
464 writel(divisor, port->membase + LTQ_ASC_BG); 466 __raw_writel(divisor, port->membase + LTQ_ASC_BG);
465 467
466 /* turn the baudrate generator back on */ 468 /* turn the baudrate generator back on */
467 asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON); 469 asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON);
468 470
469 /* enable rx */ 471 /* enable rx */
470 writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); 472 __raw_writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
471 473
472 spin_unlock_irqrestore(&ltq_asc_lock, flags); 474 spin_unlock_irqrestore(&ltq_asc_lock, flags);
473 475
@@ -578,7 +580,7 @@ lqasc_console_putchar(struct uart_port *port, int ch)
578 return; 580 return;
579 581
580 do { 582 do {
581 fifofree = (readl(port->membase + LTQ_ASC_FSTAT) 583 fifofree = (__raw_readl(port->membase + LTQ_ASC_FSTAT)
582 & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; 584 & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF;
583 } while (fifofree == 0); 585 } while (fifofree == 0);
584 writeb(ch, port->membase + LTQ_ASC_TBUF); 586 writeb(ch, port->membase + LTQ_ASC_TBUF);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index a72d6d9fb983..38016609c7fa 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -225,7 +225,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
225 unsigned int mctrl = TIOCM_DSR | TIOCM_CAR; 225 unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
226 u32 geni_ios; 226 u32 geni_ios;
227 227
228 if (uart_console(uport) || !uart_cts_enabled(uport)) { 228 if (uart_console(uport)) {
229 mctrl |= TIOCM_CTS; 229 mctrl |= TIOCM_CTS;
230 } else { 230 } else {
231 geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS); 231 geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
@@ -241,7 +241,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
241{ 241{
242 u32 uart_manual_rfr = 0; 242 u32 uart_manual_rfr = 0;
243 243
244 if (uart_console(uport) || !uart_cts_enabled(uport)) 244 if (uart_console(uport))
245 return; 245 return;
246 246
247 if (!(mctrl & TIOCM_RTS)) 247 if (!(mctrl & TIOCM_RTS))
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d4cca5bdaf1c..556f50aa1b58 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty)
130 struct uart_port *port; 130 struct uart_port *port;
131 unsigned long flags; 131 unsigned long flags;
132 132
133 if (!state)
134 return;
135
133 port = uart_port_lock(state, flags); 136 port = uart_port_lock(state, flags);
134 __uart_start(tty); 137 __uart_start(tty);
135 uart_port_unlock(port, flags); 138 uart_port_unlock(port, flags);
@@ -550,10 +553,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
550 int ret = 0; 553 int ret = 0;
551 554
552 circ = &state->xmit; 555 circ = &state->xmit;
553 if (!circ->buf) 556 port = uart_port_lock(state, flags);
557 if (!circ->buf) {
558 uart_port_unlock(port, flags);
554 return 0; 559 return 0;
560 }
555 561
556 port = uart_port_lock(state, flags);
557 if (port && uart_circ_chars_free(circ) != 0) { 562 if (port && uart_circ_chars_free(circ) != 0) {
558 circ->buf[circ->head] = c; 563 circ->buf[circ->head] = c;
559 circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); 564 circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
@@ -586,11 +591,13 @@ static int uart_write(struct tty_struct *tty,
586 return -EL3HLT; 591 return -EL3HLT;
587 } 592 }
588 593
594 port = uart_port_lock(state, flags);
589 circ = &state->xmit; 595 circ = &state->xmit;
590 if (!circ->buf) 596 if (!circ->buf) {
597 uart_port_unlock(port, flags);
591 return 0; 598 return 0;
599 }
592 600
593 port = uart_port_lock(state, flags);
594 while (port) { 601 while (port) {
595 c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); 602 c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
596 if (count < c) 603 if (count < c)
@@ -723,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty)
723 upstat_t mask = UPSTAT_SYNC_FIFO; 730 upstat_t mask = UPSTAT_SYNC_FIFO;
724 struct uart_port *port; 731 struct uart_port *port;
725 732
733 if (!state)
734 return;
735
726 port = uart_port_ref(state); 736 port = uart_port_ref(state);
727 if (!port) 737 if (!port)
728 return; 738 return;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 8df0fd824520..64bbeb7d7e0c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1921,7 +1921,7 @@ out_nomem:
1921 1921
1922static void sci_free_irq(struct sci_port *port) 1922static void sci_free_irq(struct sci_port *port)
1923{ 1923{
1924 int i; 1924 int i, j;
1925 1925
1926 /* 1926 /*
1927 * Intentionally in reverse order so we iterate over the muxed 1927 * Intentionally in reverse order so we iterate over the muxed
@@ -1937,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port)
1937 if (unlikely(irq < 0)) 1937 if (unlikely(irq < 0))
1938 continue; 1938 continue;
1939 1939
1940 /* Check if already freed (irq was muxed) */
1941 for (j = 0; j < i; j++)
1942 if (port->irqs[j] == irq)
1943 j = i + 1;
1944 if (j > i)
1945 continue;
1946
1940 free_irq(port->irqs[i], port); 1947 free_irq(port->irqs[i], port);
1941 kfree(port->irqstr[i]); 1948 kfree(port->irqstr[i]);
1942 1949
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index bfe9ad85b362..21ffcce16927 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1256,7 +1256,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
1256static int tty_reopen(struct tty_struct *tty) 1256static int tty_reopen(struct tty_struct *tty)
1257{ 1257{
1258 struct tty_driver *driver = tty->driver; 1258 struct tty_driver *driver = tty->driver;
1259 int retval; 1259 struct tty_ldisc *ld;
1260 int retval = 0;
1260 1261
1261 if (driver->type == TTY_DRIVER_TYPE_PTY && 1262 if (driver->type == TTY_DRIVER_TYPE_PTY &&
1262 driver->subtype == PTY_TYPE_MASTER) 1263 driver->subtype == PTY_TYPE_MASTER)
@@ -1268,13 +1269,18 @@ static int tty_reopen(struct tty_struct *tty)
1268 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) 1269 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
1269 return -EBUSY; 1270 return -EBUSY;
1270 1271
1271 retval = tty_ldisc_lock(tty, 5 * HZ); 1272 ld = tty_ldisc_ref_wait(tty);
1272 if (retval) 1273 if (ld) {
1273 return retval; 1274 tty_ldisc_deref(ld);
1275 } else {
1276 retval = tty_ldisc_lock(tty, 5 * HZ);
1277 if (retval)
1278 return retval;
1274 1279
1275 if (!tty->ldisc) 1280 if (!tty->ldisc)
1276 retval = tty_ldisc_reinit(tty, tty->termios.c_line); 1281 retval = tty_ldisc_reinit(tty, tty->termios.c_line);
1277 tty_ldisc_unlock(tty); 1282 tty_ldisc_unlock(tty);
1283 }
1278 1284
1279 if (retval == 0) 1285 if (retval == 0)
1280 tty->count++; 1286 tty->count++;
@@ -2183,7 +2189,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
2183 ld = tty_ldisc_ref_wait(tty); 2189 ld = tty_ldisc_ref_wait(tty);
2184 if (!ld) 2190 if (!ld)
2185 return -EIO; 2191 return -EIO;
2186 ld->ops->receive_buf(tty, &ch, &mbz, 1); 2192 if (ld->ops->receive_buf)
2193 ld->ops->receive_buf(tty, &ch, &mbz, 1);
2187 tty_ldisc_deref(ld); 2194 tty_ldisc_deref(ld);
2188 return 0; 2195 return 0;
2189} 2196}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 41ec8e5010f3..bba75560d11e 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1272,6 +1272,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1272 if (con_is_visible(vc)) 1272 if (con_is_visible(vc))
1273 update_screen(vc); 1273 update_screen(vc);
1274 vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); 1274 vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
1275 notify_update(vc);
1275 return err; 1276 return err;
1276} 1277}
1277 1278
@@ -2764,8 +2765,8 @@ rescan_last_byte:
2764 con_flush(vc, draw_from, draw_to, &draw_x); 2765 con_flush(vc, draw_from, draw_to, &draw_x);
2765 vc_uniscr_debug_check(vc); 2766 vc_uniscr_debug_check(vc);
2766 console_conditional_schedule(); 2767 console_conditional_schedule();
2767 console_unlock();
2768 notify_update(vc); 2768 notify_update(vc);
2769 console_unlock();
2769 return n; 2770 return n;
2770} 2771}
2771 2772
@@ -2884,8 +2885,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2884 unsigned char c; 2885 unsigned char c;
2885 static DEFINE_SPINLOCK(printing_lock); 2886 static DEFINE_SPINLOCK(printing_lock);
2886 const ushort *start; 2887 const ushort *start;
2887 ushort cnt = 0; 2888 ushort start_x, cnt;
2888 ushort myx;
2889 int kmsg_console; 2889 int kmsg_console;
2890 2890
2891 /* console busy or not yet initialized */ 2891 /* console busy or not yet initialized */
@@ -2898,10 +2898,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2898 if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) 2898 if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
2899 vc = vc_cons[kmsg_console - 1].d; 2899 vc = vc_cons[kmsg_console - 1].d;
2900 2900
2901 /* read `x' only after setting currcons properly (otherwise
2902 the `x' macro will read the x of the foreground console). */
2903 myx = vc->vc_x;
2904
2905 if (!vc_cons_allocated(fg_console)) { 2901 if (!vc_cons_allocated(fg_console)) {
2906 /* impossible */ 2902 /* impossible */
2907 /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */ 2903 /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
@@ -2916,53 +2912,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2916 hide_cursor(vc); 2912 hide_cursor(vc);
2917 2913
2918 start = (ushort *)vc->vc_pos; 2914 start = (ushort *)vc->vc_pos;
2919 2915 start_x = vc->vc_x;
2920 /* Contrived structure to try to emulate original need_wrap behaviour 2916 cnt = 0;
2921 * Problems caused when we have need_wrap set on '\n' character */
2922 while (count--) { 2917 while (count--) {
2923 c = *b++; 2918 c = *b++;
2924 if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { 2919 if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
2925 if (cnt > 0) { 2920 if (cnt && con_is_visible(vc))
2926 if (con_is_visible(vc)) 2921 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
2927 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); 2922 cnt = 0;
2928 vc->vc_x += cnt;
2929 if (vc->vc_need_wrap)
2930 vc->vc_x--;
2931 cnt = 0;
2932 }
2933 if (c == 8) { /* backspace */ 2923 if (c == 8) { /* backspace */
2934 bs(vc); 2924 bs(vc);
2935 start = (ushort *)vc->vc_pos; 2925 start = (ushort *)vc->vc_pos;
2936 myx = vc->vc_x; 2926 start_x = vc->vc_x;
2937 continue; 2927 continue;
2938 } 2928 }
2939 if (c != 13) 2929 if (c != 13)
2940 lf(vc); 2930 lf(vc);
2941 cr(vc); 2931 cr(vc);
2942 start = (ushort *)vc->vc_pos; 2932 start = (ushort *)vc->vc_pos;
2943 myx = vc->vc_x; 2933 start_x = vc->vc_x;
2944 if (c == 10 || c == 13) 2934 if (c == 10 || c == 13)
2945 continue; 2935 continue;
2946 } 2936 }
2937 vc_uniscr_putc(vc, c);
2947 scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); 2938 scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
2948 notify_write(vc, c); 2939 notify_write(vc, c);
2949 cnt++; 2940 cnt++;
2950 if (myx == vc->vc_cols - 1) { 2941 if (vc->vc_x == vc->vc_cols - 1) {
2951 vc->vc_need_wrap = 1;
2952 continue;
2953 }
2954 vc->vc_pos += 2;
2955 myx++;
2956 }
2957 if (cnt > 0) {
2958 if (con_is_visible(vc))
2959 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
2960 vc->vc_x += cnt;
2961 if (vc->vc_x == vc->vc_cols) {
2962 vc->vc_x--;
2963 vc->vc_need_wrap = 1; 2942 vc->vc_need_wrap = 1;
2943 } else {
2944 vc->vc_pos += 2;
2945 vc->vc_x++;
2964 } 2946 }
2965 } 2947 }
2948 if (cnt && con_is_visible(vc))
2949 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
2966 set_cursor(vc); 2950 set_cursor(vc);
2967 notify_update(vc); 2951 notify_update(vc);
2968 2952
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index e81de9ca8729..9b45aa422e69 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -316,7 +316,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
316 if (IS_ERR(data->usbmisc_data)) 316 if (IS_ERR(data->usbmisc_data))
317 return PTR_ERR(data->usbmisc_data); 317 return PTR_ERR(data->usbmisc_data);
318 318
319 if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) { 319 if ((of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC)
320 && data->usbmisc_data) {
320 pdata.flags |= CI_HDRC_IMX_IS_HSIC; 321 pdata.flags |= CI_HDRC_IMX_IS_HSIC;
321 data->usbmisc_data->hsic = 1; 322 data->usbmisc_data->hsic = 1;
322 data->pinctrl = devm_pinctrl_get(dev); 323 data->pinctrl = devm_pinctrl_get(dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ed8c62b2d9d1..739f8960811a 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1865,6 +1865,13 @@ static const struct usb_device_id acm_ids[] = {
1865 .driver_info = IGNORE_DEVICE, 1865 .driver_info = IGNORE_DEVICE,
1866 }, 1866 },
1867 1867
1868 { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
1869 .driver_info = SEND_ZERO_PACKET,
1870 },
1871 { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
1872 .driver_info = SEND_ZERO_PACKET,
1873 },
1874
1868 /* control interfaces without any protocol set */ 1875 /* control interfaces without any protocol set */
1869 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1876 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1870 USB_CDC_PROTO_NONE) }, 1877 USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 356b05c82dbc..f713cecc1f41 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -143,9 +143,12 @@ int usb_choose_configuration(struct usb_device *udev)
143 continue; 143 continue;
144 } 144 }
145 145
146 if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) { 146 if (i > 0 && desc && is_audio(desc)) {
147 best = c; 147 if (is_uac3_config(desc)) {
148 break; 148 best = c;
149 break;
150 }
151 continue;
149 } 152 }
150 153
151 /* From the remaining configs, choose the first one whose 154 /* From the remaining configs, choose the first one whose
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index dc7f7fd71684..c12ac56606c3 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -119,11 +119,6 @@ static const struct attribute_group ports_group = {
119 .attrs = ports_attrs, 119 .attrs = ports_attrs,
120}; 120};
121 121
122static const struct attribute_group *ports_groups[] = {
123 &ports_group,
124 NULL
125};
126
127/*************************************** 122/***************************************
128 * Adding & removing ports 123 * Adding & removing ports
129 ***************************************/ 124 ***************************************/
@@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
307static int usbport_trig_activate(struct led_classdev *led_cdev) 302static int usbport_trig_activate(struct led_classdev *led_cdev)
308{ 303{
309 struct usbport_trig_data *usbport_data; 304 struct usbport_trig_data *usbport_data;
305 int err;
310 306
311 usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL); 307 usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
312 if (!usbport_data) 308 if (!usbport_data)
@@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
315 311
316 /* List of ports */ 312 /* List of ports */
317 INIT_LIST_HEAD(&usbport_data->ports); 313 INIT_LIST_HEAD(&usbport_data->ports);
314 err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
315 if (err)
316 goto err_free;
318 usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports); 317 usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
319 usbport_trig_update_count(usbport_data); 318 usbport_trig_update_count(usbport_data);
320 319
@@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
322 usbport_data->nb.notifier_call = usbport_trig_notify; 321 usbport_data->nb.notifier_call = usbport_trig_notify;
323 led_set_trigger_data(led_cdev, usbport_data); 322 led_set_trigger_data(led_cdev, usbport_data);
324 usb_register_notify(&usbport_data->nb); 323 usb_register_notify(&usbport_data->nb);
325
326 return 0; 324 return 0;
325
326err_free:
327 kfree(usbport_data);
328 return err;
327} 329}
328 330
329static void usbport_trig_deactivate(struct led_classdev *led_cdev) 331static void usbport_trig_deactivate(struct led_classdev *led_cdev)
@@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev)
335 usbport_trig_remove_port(usbport_data, port); 337 usbport_trig_remove_port(usbport_data, port);
336 } 338 }
337 339
340 sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
341
338 usb_unregister_notify(&usbport_data->nb); 342 usb_unregister_notify(&usbport_data->nb);
339 343
340 kfree(usbport_data); 344 kfree(usbport_data);
@@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = {
344 .name = "usbport", 348 .name = "usbport",
345 .activate = usbport_trig_activate, 349 .activate = usbport_trig_activate,
346 .deactivate = usbport_trig_deactivate, 350 .deactivate = usbport_trig_deactivate,
347 .groups = ports_groups,
348}; 351};
349 352
350static int __init usbport_trig_init(void) 353static int __init usbport_trig_init(void)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 514c5214ddb2..8bc35d53408b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -394,7 +394,8 @@ static const struct usb_device_id usb_quirk_list[] = {
394 { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET }, 394 { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
395 395
396 /* Corsair K70 RGB */ 396 /* Corsair K70 RGB */
397 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 397 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
398 USB_QUIRK_DELAY_CTRL_MSG },
398 399
399 /* Corsair Strafe */ 400 /* Corsair Strafe */
400 { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | 401 { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 68ad75a7460d..55ef3cc2701b 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -261,7 +261,7 @@ static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
261 261
262 if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) { 262 if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
263 dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__); 263 dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
264 dwc2_clear_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT); 264 dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
265 dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG); 265 dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
266 } 266 }
267} 267}
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index cb7fcd7c0ad8..c1e9ea621f41 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
78 for (i = 0; i < exynos->num_clks; i++) { 78 for (i = 0; i < exynos->num_clks; i++) {
79 ret = clk_prepare_enable(exynos->clks[i]); 79 ret = clk_prepare_enable(exynos->clks[i]);
80 if (ret) { 80 if (ret) {
81 while (--i > 0) 81 while (i-- > 0)
82 clk_disable_unprepare(exynos->clks[i]); 82 clk_disable_unprepare(exynos->clks[i]);
83 return ret; 83 return ret;
84 } 84 }
@@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev)
223 for (i = 0; i < exynos->num_clks; i++) { 223 for (i = 0; i < exynos->num_clks; i++) {
224 ret = clk_prepare_enable(exynos->clks[i]); 224 ret = clk_prepare_enable(exynos->clks[i]);
225 if (ret) { 225 if (ret) {
226 while (--i > 0) 226 while (i-- > 0)
227 clk_disable_unprepare(exynos->clks[i]); 227 clk_disable_unprepare(exynos->clks[i]);
228 return ret; 228 return ret;
229 } 229 }
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 07bd31bb2f8a..6c9b76bcc2e1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -177,6 +177,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
177 req->started = false; 177 req->started = false;
178 list_del(&req->list); 178 list_del(&req->list);
179 req->remaining = 0; 179 req->remaining = 0;
180 req->needs_extra_trb = false;
180 181
181 if (req->request.status == -EINPROGRESS) 182 if (req->request.status == -EINPROGRESS)
182 req->request.status = status; 183 req->request.status = status;
@@ -1118,7 +1119,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
1118 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1119 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1119 unsigned int rem = length % maxp; 1120 unsigned int rem = length % maxp;
1120 1121
1121 if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { 1122 if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
1122 struct dwc3 *dwc = dep->dwc; 1123 struct dwc3 *dwc = dep->dwc;
1123 struct dwc3_trb *trb; 1124 struct dwc3_trb *trb;
1124 1125
@@ -1984,6 +1985,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
1984 1985
1985 /* begin to receive SETUP packets */ 1986 /* begin to receive SETUP packets */
1986 dwc->ep0state = EP0_SETUP_PHASE; 1987 dwc->ep0state = EP0_SETUP_PHASE;
1988 dwc->link_state = DWC3_LINK_STATE_SS_DIS;
1987 dwc3_ep0_out_start(dwc); 1989 dwc3_ep0_out_start(dwc);
1988 1990
1989 dwc3_gadget_enable_irq(dwc); 1991 dwc3_gadget_enable_irq(dwc);
@@ -3379,6 +3381,8 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
3379 dwc3_disconnect_gadget(dwc); 3381 dwc3_disconnect_gadget(dwc);
3380 __dwc3_gadget_stop(dwc); 3382 __dwc3_gadget_stop(dwc);
3381 3383
3384 synchronize_irq(dwc->irq_gadget);
3385
3382 return 0; 3386 return 0;
3383} 3387}
3384 3388
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 9cdef108fb1b..ed68a4860b7d 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -838,7 +838,7 @@ static struct usb_function *source_sink_alloc_func(
838 838
839 ss = kzalloc(sizeof(*ss), GFP_KERNEL); 839 ss = kzalloc(sizeof(*ss), GFP_KERNEL);
840 if (!ss) 840 if (!ss)
841 return NULL; 841 return ERR_PTR(-ENOMEM);
842 842
843 ss_opts = container_of(fi, struct f_ss_opts, func_inst); 843 ss_opts = container_of(fi, struct f_ss_opts, func_inst);
844 844
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 01b44e159623..ccbd1d34eb2a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -172,8 +172,9 @@ static int scratchpad_setup(struct bdc *bdc)
172 /* Refer to BDC spec, Table 4 for description of SPB */ 172 /* Refer to BDC spec, Table 4 for description of SPB */
173 sp_buff_size = 1 << (sp_buff_size + 5); 173 sp_buff_size = 1 << (sp_buff_size + 5);
174 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size); 174 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
175 bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size, 175 bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size,
176 &bdc->scratchpad.sp_dma, GFP_KERNEL); 176 &bdc->scratchpad.sp_dma,
177 GFP_KERNEL);
177 178
178 if (!bdc->scratchpad.buff) 179 if (!bdc->scratchpad.buff)
179 goto fail; 180 goto fail;
@@ -202,11 +203,9 @@ static int setup_srr(struct bdc *bdc, int interrupter)
202 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST); 203 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
203 bdc->srr.dqp_index = 0; 204 bdc->srr.dqp_index = 0;
204 /* allocate the status report descriptors */ 205 /* allocate the status report descriptors */
205 bdc->srr.sr_bds = dma_zalloc_coherent( 206 bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
206 bdc->dev, 207 NUM_SR_ENTRIES * sizeof(struct bdc_bd),
207 NUM_SR_ENTRIES * sizeof(struct bdc_bd), 208 &bdc->srr.dma_addr, GFP_KERNEL);
208 &bdc->srr.dma_addr,
209 GFP_KERNEL);
210 if (!bdc->srr.sr_bds) 209 if (!bdc->srr.sr_bds)
211 return -ENOMEM; 210 return -ENOMEM;
212 211
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 660878a19505..b77f3126580e 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
2083#if defined(PLX_PCI_RDK2) 2083#if defined(PLX_PCI_RDK2)
2084 /* see if PCI int for us by checking irqstat */ 2084 /* see if PCI int for us by checking irqstat */
2085 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); 2085 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2086 if (!intcsr & (1 << NET2272_PCI_IRQ)) { 2086 if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
2087 spin_unlock(&dev->lock); 2087 spin_unlock(&dev->lock);
2088 return IRQ_NONE; 2088 return IRQ_NONE;
2089 } 2089 }
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index f26109eafdbf..66ec1fdf9fe7 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
302MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>"); 302MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>");
303MODULE_ALIAS("mv-ehci"); 303MODULE_ALIAS("mv-ehci");
304MODULE_LICENSE("GPL"); 304MODULE_LICENSE("GPL");
305MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 6218bfe54f52..98deb5f64268 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -596,9 +596,9 @@ static int uhci_start(struct usb_hcd *hcd)
596 &uhci_debug_operations); 596 &uhci_debug_operations);
597#endif 597#endif
598 598
599 uhci->frame = dma_zalloc_coherent(uhci_dev(uhci), 599 uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
600 UHCI_NUMFRAMES * sizeof(*uhci->frame), 600 UHCI_NUMFRAMES * sizeof(*uhci->frame),
601 &uhci->frame_dma_handle, GFP_KERNEL); 601 &uhci->frame_dma_handle, GFP_KERNEL);
602 if (!uhci->frame) { 602 if (!uhci->frame) {
603 dev_err(uhci_dev(uhci), 603 dev_err(uhci_dev(uhci),
604 "unable to allocate consistent memory for frame list\n"); 604 "unable to allocate consistent memory for frame list\n");
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 36a3eb8849f1..8067f178fa84 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1672,8 +1672,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1672 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1672 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1673 for (i = 0; i < num_sp; i++) { 1673 for (i = 0; i < num_sp; i++) {
1674 dma_addr_t dma; 1674 dma_addr_t dma;
1675 void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, 1675 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1676 flags); 1676 flags);
1677 if (!buf) 1677 if (!buf)
1678 goto fail_sp4; 1678 goto fail_sp4;
1679 1679
@@ -1799,8 +1799,8 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
1799 struct xhci_erst_entry *entry; 1799 struct xhci_erst_entry *entry;
1800 1800
1801 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; 1801 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1802 erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1802 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1803 size, &erst->erst_dma_addr, flags); 1803 size, &erst->erst_dma_addr, flags);
1804 if (!erst->entries) 1804 if (!erst->entries)
1805 return -ENOMEM; 1805 return -ENOMEM;
1806 1806
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index eae8b1b1b45b..ffe462a657b1 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum)
452 } 452 }
453 453
454 if (request) { 454 if (request) {
455 u8 is_dma = 0;
456 bool short_packet = false;
457 455
458 trace_musb_req_tx(req); 456 trace_musb_req_tx(req);
459 457
460 if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 458 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
461 is_dma = 1;
462 csr |= MUSB_TXCSR_P_WZC_BITS; 459 csr |= MUSB_TXCSR_P_WZC_BITS;
463 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 460 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
464 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); 461 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
@@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
476 */ 473 */
477 if ((request->zero && request->length) 474 if ((request->zero && request->length)
478 && (request->length % musb_ep->packet_sz == 0) 475 && (request->length % musb_ep->packet_sz == 0)
479 && (request->actual == request->length)) 476 && (request->actual == request->length)) {
480 short_packet = true;
481 477
482 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
483 (is_dma && (!dma->desired_mode ||
484 (request->actual &
485 (musb_ep->packet_sz - 1)))))
486 short_packet = true;
487
488 if (short_packet) {
489 /* 478 /*
490 * On DMA completion, FIFO may not be 479 * On DMA completion, FIFO may not be
491 * available yet... 480 * available yet...
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a688f7f87829..5fc6825745f2 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
346 channel->status = MUSB_DMA_STATUS_FREE; 346 channel->status = MUSB_DMA_STATUS_FREE;
347 347
348 /* completed */ 348 /* completed */
349 if ((devctl & MUSB_DEVCTL_HM) 349 if (musb_channel->transmit &&
350 && (musb_channel->transmit) 350 (!channel->desired_mode ||
351 && ((channel->desired_mode == 0) 351 (channel->actual_len %
352 || (channel->actual_len & 352 musb_channel->max_packet_sz))) {
353 (musb_channel->max_packet_sz - 1)))
354 ) {
355 u8 epnum = musb_channel->epnum; 353 u8 epnum = musb_channel->epnum;
356 int offset = musb->io.ep_offset(epnum, 354 int offset = musb->io.ep_offset(epnum,
357 MUSB_TXCSR); 355 MUSB_TXCSR);
@@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
363 */ 361 */
364 musb_ep_select(mbase, epnum); 362 musb_ep_select(mbase, epnum);
365 txcsr = musb_readw(mbase, offset); 363 txcsr = musb_readw(mbase, offset);
366 txcsr &= ~(MUSB_TXCSR_DMAENAB 364 if (channel->desired_mode == 1) {
365 txcsr &= ~(MUSB_TXCSR_DMAENAB
367 | MUSB_TXCSR_AUTOSET); 366 | MUSB_TXCSR_AUTOSET);
368 musb_writew(mbase, offset, txcsr); 367 musb_writew(mbase, offset, txcsr);
369 /* Send out the packet */ 368 /* Send out the packet */
370 txcsr &= ~MUSB_TXCSR_DMAMODE; 369 txcsr &= ~MUSB_TXCSR_DMAMODE;
370 txcsr |= MUSB_TXCSR_DMAENAB;
371 }
371 txcsr |= MUSB_TXCSR_TXPKTRDY; 372 txcsr |= MUSB_TXCSR_TXPKTRDY;
372 musb_writew(mbase, offset, txcsr); 373 musb_writew(mbase, offset, txcsr);
373 } 374 }
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index d7312eed6088..91ea3083e7ad 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,7 @@ config AB8500_USB
21 21
22config FSL_USB2_OTG 22config FSL_USB2_OTG
23 bool "Freescale USB OTG Transceiver Driver" 23 bool "Freescale USB OTG Transceiver Driver"
24 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM 24 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
25 depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' 25 depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
26 select USB_PHY 26 select USB_PHY
27 help 27 help
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 27bdb7222527..f5f0568d8533 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
61 if (ret) 61 if (ret)
62 return ret; 62 return ret;
63 63
64 ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
65 if (ret)
66 return ret;
67 am_phy->usb_phy_gen.phy.init = am335x_init; 64 am_phy->usb_phy_gen.phy.init = am335x_init;
68 am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; 65 am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
69 66
@@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
82 device_set_wakeup_enable(dev, false); 79 device_set_wakeup_enable(dev, false);
83 phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); 80 phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
84 81
85 return 0; 82 return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
86} 83}
87 84
88static int am335x_phy_remove(struct platform_device *pdev) 85static int am335x_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1ab2a6191013..77ef4c481f3c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1783,6 +1783,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
1783 int result; 1783 int result;
1784 u16 val; 1784 u16 val;
1785 1785
1786 result = usb_autopm_get_interface(serial->interface);
1787 if (result)
1788 return result;
1789
1786 val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value; 1790 val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
1787 result = usb_control_msg(serial->dev, 1791 result = usb_control_msg(serial->dev,
1788 usb_sndctrlpipe(serial->dev, 0), 1792 usb_sndctrlpipe(serial->dev, 0),
@@ -1795,6 +1799,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
1795 val, result); 1799 val, result);
1796 } 1800 }
1797 1801
1802 usb_autopm_put_interface(serial->interface);
1803
1798 return result; 1804 return result;
1799} 1805}
1800 1806
@@ -1846,9 +1852,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
1846 unsigned char *buf; 1852 unsigned char *buf;
1847 int result; 1853 int result;
1848 1854
1855 result = usb_autopm_get_interface(serial->interface);
1856 if (result)
1857 return result;
1858
1849 buf = kmalloc(1, GFP_KERNEL); 1859 buf = kmalloc(1, GFP_KERNEL);
1850 if (!buf) 1860 if (!buf) {
1861 usb_autopm_put_interface(serial->interface);
1851 return -ENOMEM; 1862 return -ENOMEM;
1863 }
1852 1864
1853 result = usb_control_msg(serial->dev, 1865 result = usb_control_msg(serial->dev,
1854 usb_rcvctrlpipe(serial->dev, 0), 1866 usb_rcvctrlpipe(serial->dev, 0),
@@ -1863,6 +1875,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
1863 } 1875 }
1864 1876
1865 kfree(buf); 1877 kfree(buf);
1878 usb_autopm_put_interface(serial->interface);
1866 1879
1867 return result; 1880 return result;
1868} 1881}
diff --git a/drivers/usb/serial/keyspan_usa26msg.h b/drivers/usb/serial/keyspan_usa26msg.h
index 09e21e84fc4e..a68f1fb25b8a 100644
--- a/drivers/usb/serial/keyspan_usa26msg.h
+++ b/drivers/usb/serial/keyspan_usa26msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa26msg.h 3 usa26msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa28msg.h b/drivers/usb/serial/keyspan_usa28msg.h
index dee454c4609a..a19f3fe5d98d 100644
--- a/drivers/usb/serial/keyspan_usa28msg.h
+++ b/drivers/usb/serial/keyspan_usa28msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa28msg.h 3 usa28msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa49msg.h b/drivers/usb/serial/keyspan_usa49msg.h
index 163b2dea2ec5..8c3970fdd868 100644
--- a/drivers/usb/serial/keyspan_usa49msg.h
+++ b/drivers/usb/serial/keyspan_usa49msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa49msg.h 3 usa49msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa67msg.h b/drivers/usb/serial/keyspan_usa67msg.h
index 20fa3e2f7187..dcf502fdbb44 100644
--- a/drivers/usb/serial/keyspan_usa67msg.h
+++ b/drivers/usb/serial/keyspan_usa67msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa67msg.h 3 usa67msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa90msg.h b/drivers/usb/serial/keyspan_usa90msg.h
index 86708ecd8735..c4ca0f631d20 100644
--- a/drivers/usb/serial/keyspan_usa90msg.h
+++ b/drivers/usb/serial/keyspan_usa90msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa90msg.h 3 usa90msg.h
3 4
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 98e7a5df0f6d..bb3f9aa4a909 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = {
46 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, 46 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
47 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, 47 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
48 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, 48 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
49 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
49 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, 50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 51 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
51 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), 52 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 4e2554d55362..559941ca884d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -8,6 +8,7 @@
8 8
9#define PL2303_VENDOR_ID 0x067b 9#define PL2303_VENDOR_ID 0x067b
10#define PL2303_PRODUCT_ID 0x2303 10#define PL2303_PRODUCT_ID 0x2303
11#define PL2303_PRODUCT_ID_TB 0x2304
11#define PL2303_PRODUCT_ID_RSAQ2 0x04bb 12#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
12#define PL2303_PRODUCT_ID_DCU11 0x1234 13#define PL2303_PRODUCT_ID_DCU11 0x1234
13#define PL2303_PRODUCT_ID_PHAROS 0xaaa0 14#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
@@ -20,6 +21,7 @@
20#define PL2303_PRODUCT_ID_MOTOROLA 0x0307 21#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
21#define PL2303_PRODUCT_ID_ZTEK 0xe1f1 22#define PL2303_PRODUCT_ID_ZTEK 0xe1f1
22 23
24
23#define ATEN_VENDOR_ID 0x0557 25#define ATEN_VENDOR_ID 0x0557
24#define ATEN_VENDOR_ID2 0x0547 26#define ATEN_VENDOR_ID2 0x0547
25#define ATEN_PRODUCT_ID 0x2008 27#define ATEN_PRODUCT_ID 0x2008
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 4d0273508043..edbbb13d6de6 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS);
85/* Motorola Tetra driver */ 85/* Motorola Tetra driver */
86#define MOTOROLA_TETRA_IDS() \ 86#define MOTOROLA_TETRA_IDS() \
87 { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ 87 { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
88 { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ 88 { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
89 { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
89DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); 90DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
90 91
91/* Novatel Wireless GPS driver */ 92/* Novatel Wireless GPS driver */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index fde2e71a6ade..a73ea495d5a7 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -235,8 +235,12 @@ static int slave_configure(struct scsi_device *sdev)
235 if (!(us->fflags & US_FL_NEEDS_CAP16)) 235 if (!(us->fflags & US_FL_NEEDS_CAP16))
236 sdev->try_rc_10_first = 1; 236 sdev->try_rc_10_first = 1;
237 237
238 /* assume SPC3 or latter devices support sense size > 18 */ 238 /*
239 if (sdev->scsi_level > SCSI_SPC_2) 239 * assume SPC3 or latter devices support sense size > 18
240 * unless US_FL_BAD_SENSE quirk is specified.
241 */
242 if (sdev->scsi_level > SCSI_SPC_2 &&
243 !(us->fflags & US_FL_BAD_SENSE))
240 us->fflags |= US_FL_SANE_SENSE; 244 us->fflags |= US_FL_SANE_SENSE;
241 245
242 /* 246 /*
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f7f83b21dc74..ea0d27a94afe 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1266,6 +1266,18 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
1266 US_FL_FIX_CAPACITY ), 1266 US_FL_FIX_CAPACITY ),
1267 1267
1268/* 1268/*
1269 * Reported by Icenowy Zheng <icenowy@aosc.io>
1270 * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
1271 * that do not process read/write command if a long sense is requested,
1272 * so force to use 18-byte sense.
1273 */
1274UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff,
1275 "SMI",
1276 "SM3350 UFS-to-USB-Mass-Storage bridge",
1277 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1278 US_FL_BAD_SENSE ),
1279
1280/*
1269 * Reported by Paul Hartman <paul.hartman+linux@gmail.com> 1281 * Reported by Paul Hartman <paul.hartman+linux@gmail.com>
1270 * This card reader returns "Illegal Request, Logical Block Address 1282 * This card reader returns "Illegal Request, Logical Block Address
1271 * Out of Range" for the first READ(10) after a new card is inserted. 1283 * Out of Range" for the first READ(10) after a new card is inserted.
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 4bc29b586698..f1c39a3c7534 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -2297,7 +2297,8 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
2297 pdo_pps_apdo_max_voltage(snk)); 2297 pdo_pps_apdo_max_voltage(snk));
2298 port->pps_data.max_curr = min_pps_apdo_current(src, snk); 2298 port->pps_data.max_curr = min_pps_apdo_current(src, snk);
2299 port->pps_data.out_volt = min(port->pps_data.max_volt, 2299 port->pps_data.out_volt = min(port->pps_data.max_volt,
2300 port->pps_data.out_volt); 2300 max(port->pps_data.min_volt,
2301 port->pps_data.out_volt));
2301 port->pps_data.op_curr = min(port->pps_data.max_curr, 2302 port->pps_data.op_curr = min(port->pps_data.max_curr,
2302 port->pps_data.op_curr); 2303 port->pps_data.op_curr);
2303 } 2304 }
diff --git a/drivers/usb/usbip/README b/drivers/usb/usbip/README
deleted file mode 100644
index 41a2cf2e77a6..000000000000
--- a/drivers/usb/usbip/README
+++ /dev/null
@@ -1,7 +0,0 @@
1TODO:
2 - more discussion about the protocol
3 - testing
4 - review of the userspace interface
5 - document the protocol
6
7Please send patches for this code to Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/trace.h
index 228ccdb8d1c8..b2aa986ab9ed 100644
--- a/drivers/vfio/pci/trace.h
+++ b/drivers/vfio/pci/trace.h
@@ -1,13 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */ 1/* SPDX-License-Identifier: GPL-2.0-only */
2/* 2/*
3 * VFIO PCI mmap/mmap_fault tracepoints 3 * VFIO PCI mmap/mmap_fault tracepoints
4 * 4 *
5 * Copyright (C) 2018 IBM Corp. All rights reserved. 5 * Copyright (C) 2018 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru> 6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 7 */
12 8
13#undef TRACE_SYSTEM 9#undef TRACE_SYSTEM
@@ -94,7 +90,7 @@ TRACE_EVENT(vfio_pci_npu2_mmap,
94#endif /* _TRACE_VFIO_PCI_H */ 90#endif /* _TRACE_VFIO_PCI_H */
95 91
96#undef TRACE_INCLUDE_PATH 92#undef TRACE_INCLUDE_PATH
97#define TRACE_INCLUDE_PATH . 93#define TRACE_INCLUDE_PATH ../../drivers/vfio/pci
98#undef TRACE_INCLUDE_FILE 94#undef TRACE_INCLUDE_FILE
99#define TRACE_INCLUDE_FILE trace 95#define TRACE_INCLUDE_FILE trace
100 96
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index 054a2cf9dd8e..32f695ffe128 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -1,14 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2. 3 * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2.
4 * 4 *
5 * Copyright (C) 2018 IBM Corp. All rights reserved. 5 * Copyright (C) 2018 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru> 6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Register an on-GPU RAM region for cacheable access. 8 * Register an on-GPU RAM region for cacheable access.
13 * 9 *
14 * Derived from original vfio_pci_igd.c: 10 * Derived from original vfio_pci_igd.c:
@@ -178,11 +174,11 @@ static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
178 struct vfio_pci_region *region, struct vfio_info_cap *caps) 174 struct vfio_pci_region *region, struct vfio_info_cap *caps)
179{ 175{
180 struct vfio_pci_nvgpu_data *data = region->data; 176 struct vfio_pci_nvgpu_data *data = region->data;
181 struct vfio_region_info_cap_nvlink2_ssatgt cap = { 0 }; 177 struct vfio_region_info_cap_nvlink2_ssatgt cap = {
182 178 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
183 cap.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT; 179 .header.version = 1,
184 cap.header.version = 1; 180 .tgt = data->gpu_tgt
185 cap.tgt = data->gpu_tgt; 181 };
186 182
187 return vfio_info_add_capability(caps, &cap.header, sizeof(cap)); 183 return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
188} 184}
@@ -365,18 +361,18 @@ static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
365 struct vfio_pci_region *region, struct vfio_info_cap *caps) 361 struct vfio_pci_region *region, struct vfio_info_cap *caps)
366{ 362{
367 struct vfio_pci_npu2_data *data = region->data; 363 struct vfio_pci_npu2_data *data = region->data;
368 struct vfio_region_info_cap_nvlink2_ssatgt captgt = { 0 }; 364 struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
369 struct vfio_region_info_cap_nvlink2_lnkspd capspd = { 0 }; 365 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
366 .header.version = 1,
367 .tgt = data->gpu_tgt
368 };
369 struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
370 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
371 .header.version = 1,
372 .link_speed = data->link_speed
373 };
370 int ret; 374 int ret;
371 375
372 captgt.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT;
373 captgt.header.version = 1;
374 captgt.tgt = data->gpu_tgt;
375
376 capspd.header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD;
377 capspd.header.version = 1;
378 capspd.link_speed = data->link_speed;
379
380 ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt)); 376 ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
381 if (ret) 377 if (ret)
382 return ret; 378 return ret;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 7651cfb14836..73652e21efec 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -878,7 +878,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
878 return -EINVAL; 878 return -EINVAL;
879 if (!unmap->size || unmap->size & mask) 879 if (!unmap->size || unmap->size & mask)
880 return -EINVAL; 880 return -EINVAL;
881 if (unmap->iova + unmap->size < unmap->iova || 881 if (unmap->iova + unmap->size - 1 < unmap->iova ||
882 unmap->size > SIZE_MAX) 882 unmap->size > SIZE_MAX)
883 return -EINVAL; 883 return -EINVAL;
884 884
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 36f3d0f49e60..df51a35cf537 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1236,7 +1236,8 @@ static void handle_rx(struct vhost_net *net)
1236 if (nvq->done_idx > VHOST_NET_BATCH) 1236 if (nvq->done_idx > VHOST_NET_BATCH)
1237 vhost_net_signal_used(nvq); 1237 vhost_net_signal_used(nvq);
1238 if (unlikely(vq_log)) 1238 if (unlikely(vq_log))
1239 vhost_log_write(vq, vq_log, log, vhost_len); 1239 vhost_log_write(vq, vq_log, log, vhost_len,
1240 vq->iov, in);
1240 total_len += vhost_len; 1241 total_len += vhost_len;
1241 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { 1242 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
1242 vhost_poll_queue(&vq->poll); 1243 vhost_poll_queue(&vq->poll);
@@ -1336,7 +1337,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
1336 n->vqs[i].rx_ring = NULL; 1337 n->vqs[i].rx_ring = NULL;
1337 vhost_net_buf_init(&n->vqs[i].rxq); 1338 vhost_net_buf_init(&n->vqs[i].rxq);
1338 } 1339 }
1339 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); 1340 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1341 UIO_MAXIOV + VHOST_NET_BATCH);
1340 1342
1341 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); 1343 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
1342 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); 1344 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 8e10ab436d1f..23593cb23dd0 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
1127 struct vhost_virtqueue *vq, 1127 struct vhost_virtqueue *vq,
1128 struct vhost_scsi_ctx *vc) 1128 struct vhost_scsi_ctx *vc)
1129{ 1129{
1130 struct virtio_scsi_ctrl_tmf_resp __user *resp;
1131 struct virtio_scsi_ctrl_tmf_resp rsp; 1130 struct virtio_scsi_ctrl_tmf_resp rsp;
1131 struct iov_iter iov_iter;
1132 int ret; 1132 int ret;
1133 1133
1134 pr_debug("%s\n", __func__); 1134 pr_debug("%s\n", __func__);
1135 memset(&rsp, 0, sizeof(rsp)); 1135 memset(&rsp, 0, sizeof(rsp));
1136 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; 1136 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1137 resp = vq->iov[vc->out].iov_base; 1137
1138 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 1138 iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1139 if (!ret) 1139
1140 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1141 if (likely(ret == sizeof(rsp)))
1140 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); 1142 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1141 else 1143 else
1142 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); 1144 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
@@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1147 struct vhost_virtqueue *vq, 1149 struct vhost_virtqueue *vq,
1148 struct vhost_scsi_ctx *vc) 1150 struct vhost_scsi_ctx *vc)
1149{ 1151{
1150 struct virtio_scsi_ctrl_an_resp __user *resp;
1151 struct virtio_scsi_ctrl_an_resp rsp; 1152 struct virtio_scsi_ctrl_an_resp rsp;
1153 struct iov_iter iov_iter;
1152 int ret; 1154 int ret;
1153 1155
1154 pr_debug("%s\n", __func__); 1156 pr_debug("%s\n", __func__);
1155 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ 1157 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1156 rsp.response = VIRTIO_SCSI_S_OK; 1158 rsp.response = VIRTIO_SCSI_S_OK;
1157 resp = vq->iov[vc->out].iov_base; 1159
1158 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 1160 iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1159 if (!ret) 1161
1162 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1163 if (likely(ret == sizeof(rsp)))
1160 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); 1164 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1161 else 1165 else
1162 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); 1166 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
@@ -1623,7 +1627,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1623 vqs[i] = &vs->vqs[i].vq; 1627 vqs[i] = &vs->vqs[i].vq;
1624 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1628 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1625 } 1629 }
1626 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); 1630 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
1627 1631
1628 vhost_scsi_init_inflight(vs, NULL); 1632 vhost_scsi_init_inflight(vs, NULL);
1629 1633
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9f7942cbcbb2..a2e5dc7716e2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
390 vq->indirect = kmalloc_array(UIO_MAXIOV, 390 vq->indirect = kmalloc_array(UIO_MAXIOV,
391 sizeof(*vq->indirect), 391 sizeof(*vq->indirect),
392 GFP_KERNEL); 392 GFP_KERNEL);
393 vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log), 393 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
394 GFP_KERNEL); 394 GFP_KERNEL);
395 vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads), 395 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
396 GFP_KERNEL); 396 GFP_KERNEL);
397 if (!vq->indirect || !vq->log || !vq->heads) 397 if (!vq->indirect || !vq->log || !vq->heads)
398 goto err_nomem; 398 goto err_nomem;
@@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
414} 414}
415 415
416void vhost_dev_init(struct vhost_dev *dev, 416void vhost_dev_init(struct vhost_dev *dev,
417 struct vhost_virtqueue **vqs, int nvqs) 417 struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
418{ 418{
419 struct vhost_virtqueue *vq; 419 struct vhost_virtqueue *vq;
420 int i; 420 int i;
@@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
427 dev->iotlb = NULL; 427 dev->iotlb = NULL;
428 dev->mm = NULL; 428 dev->mm = NULL;
429 dev->worker = NULL; 429 dev->worker = NULL;
430 dev->iov_limit = iov_limit;
430 init_llist_head(&dev->work_list); 431 init_llist_head(&dev->work_list);
431 init_waitqueue_head(&dev->wait); 432 init_waitqueue_head(&dev->wait);
432 INIT_LIST_HEAD(&dev->read_list); 433 INIT_LIST_HEAD(&dev->read_list);
@@ -1034,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1034 int type, ret; 1035 int type, ret;
1035 1036
1036 ret = copy_from_iter(&type, sizeof(type), from); 1037 ret = copy_from_iter(&type, sizeof(type), from);
1037 if (ret != sizeof(type)) 1038 if (ret != sizeof(type)) {
1039 ret = -EINVAL;
1038 goto done; 1040 goto done;
1041 }
1039 1042
1040 switch (type) { 1043 switch (type) {
1041 case VHOST_IOTLB_MSG: 1044 case VHOST_IOTLB_MSG:
@@ -1054,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1054 1057
1055 iov_iter_advance(from, offset); 1058 iov_iter_advance(from, offset);
1056 ret = copy_from_iter(&msg, sizeof(msg), from); 1059 ret = copy_from_iter(&msg, sizeof(msg), from);
1057 if (ret != sizeof(msg)) 1060 if (ret != sizeof(msg)) {
1061 ret = -EINVAL;
1058 goto done; 1062 goto done;
1063 }
1059 if (vhost_process_iotlb_msg(dev, &msg)) { 1064 if (vhost_process_iotlb_msg(dev, &msg)) {
1060 ret = -EFAULT; 1065 ret = -EFAULT;
1061 goto done; 1066 goto done;
@@ -1733,13 +1738,87 @@ static int log_write(void __user *log_base,
1733 return r; 1738 return r;
1734} 1739}
1735 1740
1741static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1742{
1743 struct vhost_umem *umem = vq->umem;
1744 struct vhost_umem_node *u;
1745 u64 start, end, l, min;
1746 int r;
1747 bool hit = false;
1748
1749 while (len) {
1750 min = len;
1751 /* More than one GPAs can be mapped into a single HVA. So
1752 * iterate all possible umems here to be safe.
1753 */
1754 list_for_each_entry(u, &umem->umem_list, link) {
1755 if (u->userspace_addr > hva - 1 + len ||
1756 u->userspace_addr - 1 + u->size < hva)
1757 continue;
1758 start = max(u->userspace_addr, hva);
1759 end = min(u->userspace_addr - 1 + u->size,
1760 hva - 1 + len);
1761 l = end - start + 1;
1762 r = log_write(vq->log_base,
1763 u->start + start - u->userspace_addr,
1764 l);
1765 if (r < 0)
1766 return r;
1767 hit = true;
1768 min = min(l, min);
1769 }
1770
1771 if (!hit)
1772 return -EFAULT;
1773
1774 len -= min;
1775 hva += min;
1776 }
1777
1778 return 0;
1779}
1780
1781static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1782{
1783 struct iovec iov[64];
1784 int i, ret;
1785
1786 if (!vq->iotlb)
1787 return log_write(vq->log_base, vq->log_addr + used_offset, len);
1788
1789 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1790 len, iov, 64, VHOST_ACCESS_WO);
1791 if (ret < 0)
1792 return ret;
1793
1794 for (i = 0; i < ret; i++) {
1795 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1796 iov[i].iov_len);
1797 if (ret)
1798 return ret;
1799 }
1800
1801 return 0;
1802}
1803
1736int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 1804int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1737 unsigned int log_num, u64 len) 1805 unsigned int log_num, u64 len, struct iovec *iov, int count)
1738{ 1806{
1739 int i, r; 1807 int i, r;
1740 1808
1741 /* Make sure data written is seen before log. */ 1809 /* Make sure data written is seen before log. */
1742 smp_wmb(); 1810 smp_wmb();
1811
1812 if (vq->iotlb) {
1813 for (i = 0; i < count; i++) {
1814 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1815 iov[i].iov_len);
1816 if (r < 0)
1817 return r;
1818 }
1819 return 0;
1820 }
1821
1743 for (i = 0; i < log_num; ++i) { 1822 for (i = 0; i < log_num; ++i) {
1744 u64 l = min(log[i].len, len); 1823 u64 l = min(log[i].len, len);
1745 r = log_write(vq->log_base, log[i].addr, l); 1824 r = log_write(vq->log_base, log[i].addr, l);
@@ -1769,9 +1848,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1769 smp_wmb(); 1848 smp_wmb();
1770 /* Log used flag write. */ 1849 /* Log used flag write. */
1771 used = &vq->used->flags; 1850 used = &vq->used->flags;
1772 log_write(vq->log_base, vq->log_addr + 1851 log_used(vq, (used - (void __user *)vq->used),
1773 (used - (void __user *)vq->used), 1852 sizeof vq->used->flags);
1774 sizeof vq->used->flags);
1775 if (vq->log_ctx) 1853 if (vq->log_ctx)
1776 eventfd_signal(vq->log_ctx, 1); 1854 eventfd_signal(vq->log_ctx, 1);
1777 } 1855 }
@@ -1789,9 +1867,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1789 smp_wmb(); 1867 smp_wmb();
1790 /* Log avail event write */ 1868 /* Log avail event write */
1791 used = vhost_avail_event(vq); 1869 used = vhost_avail_event(vq);
1792 log_write(vq->log_base, vq->log_addr + 1870 log_used(vq, (used - (void __user *)vq->used),
1793 (used - (void __user *)vq->used), 1871 sizeof *vhost_avail_event(vq));
1794 sizeof *vhost_avail_event(vq));
1795 if (vq->log_ctx) 1872 if (vq->log_ctx)
1796 eventfd_signal(vq->log_ctx, 1); 1873 eventfd_signal(vq->log_ctx, 1);
1797 } 1874 }
@@ -2191,10 +2268,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2191 /* Make sure data is seen before log. */ 2268 /* Make sure data is seen before log. */
2192 smp_wmb(); 2269 smp_wmb();
2193 /* Log used ring entry write. */ 2270 /* Log used ring entry write. */
2194 log_write(vq->log_base, 2271 log_used(vq, ((void __user *)used - (void __user *)vq->used),
2195 vq->log_addr + 2272 count * sizeof *used);
2196 ((void __user *)used - (void __user *)vq->used),
2197 count * sizeof *used);
2198 } 2273 }
2199 old = vq->last_used_idx; 2274 old = vq->last_used_idx;
2200 new = (vq->last_used_idx += count); 2275 new = (vq->last_used_idx += count);
@@ -2236,9 +2311,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2236 /* Make sure used idx is seen before log. */ 2311 /* Make sure used idx is seen before log. */
2237 smp_wmb(); 2312 smp_wmb();
2238 /* Log used index update. */ 2313 /* Log used index update. */
2239 log_write(vq->log_base, 2314 log_used(vq, offsetof(struct vring_used, idx),
2240 vq->log_addr + offsetof(struct vring_used, idx), 2315 sizeof vq->used->idx);
2241 sizeof vq->used->idx);
2242 if (vq->log_ctx) 2316 if (vq->log_ctx)
2243 eventfd_signal(vq->log_ctx, 1); 2317 eventfd_signal(vq->log_ctx, 1);
2244 } 2318 }
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 466ef7542291..9490e7ddb340 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -170,9 +170,11 @@ struct vhost_dev {
170 struct list_head read_list; 170 struct list_head read_list;
171 struct list_head pending_list; 171 struct list_head pending_list;
172 wait_queue_head_t wait; 172 wait_queue_head_t wait;
173 int iov_limit;
173}; 174};
174 175
175void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); 176void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
177 int nvqs, int iov_limit);
176long vhost_dev_set_owner(struct vhost_dev *dev); 178long vhost_dev_set_owner(struct vhost_dev *dev);
177bool vhost_dev_has_owner(struct vhost_dev *dev); 179bool vhost_dev_has_owner(struct vhost_dev *dev);
178long vhost_dev_check_owner(struct vhost_dev *); 180long vhost_dev_check_owner(struct vhost_dev *);
@@ -205,7 +207,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
205bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); 207bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
206 208
207int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 209int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
208 unsigned int log_num, u64 len); 210 unsigned int log_num, u64 len,
211 struct iovec *iov, int count);
209int vq_iotlb_prefetch(struct vhost_virtqueue *vq); 212int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
210 213
211struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); 214struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bc42d38ae031..bb5fc0e9fbc2 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
531 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; 531 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
532 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; 532 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
533 533
534 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); 534 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
535 535
536 file->private_data = vsock; 536 file->private_data = vsock;
537 spin_lock_init(&vsock->send_pkt_list_lock); 537 spin_lock_init(&vsock->send_pkt_list_lock);
@@ -642,7 +642,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
642 hash_del_rcu(&vsock->hash); 642 hash_del_rcu(&vsock->hash);
643 643
644 vsock->guest_cid = guest_cid; 644 vsock->guest_cid = guest_cid;
645 hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); 645 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
646 mutex_unlock(&vhost_vsock_mutex); 646 mutex_unlock(&vhost_vsock_mutex);
647 647
648 return 0; 648 return 0;
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 6d8dc2c77520..51e0c4be08df 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -174,7 +174,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev,
174 return -ENODEV; 174 return -ENODEV;
175 } 175 }
176 for_each_child_of_node(nproot, np) { 176 for_each_child_of_node(nproot, np) {
177 if (!of_node_cmp(np->name, name)) { 177 if (of_node_name_eq(np, name)) {
178 of_property_read_u32(np, "marvell,88pm860x-iset", 178 of_property_read_u32(np, "marvell,88pm860x-iset",
179 &iset); 179 &iset);
180 data->iset = PM8606_WLED_CURRENT(iset); 180 data->iset = PM8606_WLED_CURRENT(iset);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index f9ef0673a083..feb90764a811 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -30,6 +30,7 @@ struct pwm_bl_data {
30 struct device *dev; 30 struct device *dev;
31 unsigned int lth_brightness; 31 unsigned int lth_brightness;
32 unsigned int *levels; 32 unsigned int *levels;
33 bool enabled;
33 struct regulator *power_supply; 34 struct regulator *power_supply;
34 struct gpio_desc *enable_gpio; 35 struct gpio_desc *enable_gpio;
35 unsigned int scale; 36 unsigned int scale;
@@ -50,7 +51,7 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb)
50 int err; 51 int err;
51 52
52 pwm_get_state(pb->pwm, &state); 53 pwm_get_state(pb->pwm, &state);
53 if (state.enabled) 54 if (pb->enabled)
54 return; 55 return;
55 56
56 err = regulator_enable(pb->power_supply); 57 err = regulator_enable(pb->power_supply);
@@ -65,6 +66,8 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb)
65 66
66 if (pb->enable_gpio) 67 if (pb->enable_gpio)
67 gpiod_set_value_cansleep(pb->enable_gpio, 1); 68 gpiod_set_value_cansleep(pb->enable_gpio, 1);
69
70 pb->enabled = true;
68} 71}
69 72
70static void pwm_backlight_power_off(struct pwm_bl_data *pb) 73static void pwm_backlight_power_off(struct pwm_bl_data *pb)
@@ -72,7 +75,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
72 struct pwm_state state; 75 struct pwm_state state;
73 76
74 pwm_get_state(pb->pwm, &state); 77 pwm_get_state(pb->pwm, &state);
75 if (!state.enabled) 78 if (!pb->enabled)
76 return; 79 return;
77 80
78 if (pb->enable_gpio) 81 if (pb->enable_gpio)
@@ -86,6 +89,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
86 pwm_apply_state(pb->pwm, &state); 89 pwm_apply_state(pb->pwm, &state);
87 90
88 regulator_disable(pb->power_supply); 91 regulator_disable(pb->power_supply);
92 pb->enabled = false;
89} 93}
90 94
91static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) 95static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
@@ -269,6 +273,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
269 memset(data, 0, sizeof(*data)); 273 memset(data, 0, sizeof(*data));
270 274
271 /* 275 /*
276 * These values are optional and set as 0 by default, the out values
277 * are modified only if a valid u32 value can be decoded.
278 */
279 of_property_read_u32(node, "post-pwm-on-delay-ms",
280 &data->post_pwm_on_delay);
281 of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
282
283 data->enable_gpio = -EINVAL;
284
285 /*
272 * Determine the number of brightness levels, if this property is not 286 * Determine the number of brightness levels, if this property is not
273 * set a default table of brightness levels will be used. 287 * set a default table of brightness levels will be used.
274 */ 288 */
@@ -380,15 +394,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
380 data->max_brightness--; 394 data->max_brightness--;
381 } 395 }
382 396
383 /*
384 * These values are optional and set as 0 by default, the out values
385 * are modified only if a valid u32 value can be decoded.
386 */
387 of_property_read_u32(node, "post-pwm-on-delay-ms",
388 &data->post_pwm_on_delay);
389 of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
390
391 data->enable_gpio = -EINVAL;
392 return 0; 397 return 0;
393} 398}
394 399
@@ -483,6 +488,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
483 pb->check_fb = data->check_fb; 488 pb->check_fb = data->check_fb;
484 pb->exit = data->exit; 489 pb->exit = data->exit;
485 pb->dev = &pdev->dev; 490 pb->dev = &pdev->dev;
491 pb->enabled = false;
486 pb->post_pwm_on_delay = data->post_pwm_on_delay; 492 pb->post_pwm_on_delay = data->post_pwm_on_delay;
487 pb->pwm_off_delay = data->pwm_off_delay; 493 pb->pwm_off_delay = data->pwm_off_delay;
488 494
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 09731b2f6815..c6b3bdbbdbc9 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
271 271
272static void vgacon_restore_screen(struct vc_data *c) 272static void vgacon_restore_screen(struct vc_data *c)
273{ 273{
274 c->vc_origin = c->vc_visible_origin;
274 vgacon_scrollback_cur->save = 0; 275 vgacon_scrollback_cur->save = 0;
275 276
276 if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { 277 if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
@@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
287 int start, end, count, soff; 288 int start, end, count, soff;
288 289
289 if (!lines) { 290 if (!lines) {
290 c->vc_visible_origin = c->vc_origin; 291 vgacon_restore_screen(c);
291 vga_set_mem_top(c);
292 return; 292 return;
293 } 293 }
294 294
@@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
298 if (!vgacon_scrollback_cur->save) { 298 if (!vgacon_scrollback_cur->save) {
299 vgacon_cursor(c, CM_ERASE); 299 vgacon_cursor(c, CM_ERASE);
300 vgacon_save_screen(c); 300 vgacon_save_screen(c);
301 c->vc_origin = (unsigned long)c->vc_screenbuf;
301 vgacon_scrollback_cur->save = 1; 302 vgacon_scrollback_cur->save = 1;
302 } 303 }
303 304
@@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
335 int copysize; 336 int copysize;
336 337
337 int diff = c->vc_rows - count; 338 int diff = c->vc_rows - count;
338 void *d = (void *) c->vc_origin; 339 void *d = (void *) c->vc_visible_origin;
339 void *s = (void *) c->vc_screenbuf; 340 void *s = (void *) c->vc_screenbuf;
340 341
341 count *= c->vc_size_row; 342 count *= c->vc_size_row;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 8976190b6c1f..bfa1360ec750 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -510,6 +510,13 @@ static int __init fb_console_setup(char *this_opt)
510 continue; 510 continue;
511 } 511 }
512#endif 512#endif
513
514 if (!strncmp(options, "logo-pos:", 9)) {
515 options += 9;
516 if (!strcmp(options, "center"))
517 fb_center_logo = true;
518 continue;
519 }
513 } 520 }
514 return 1; 521 return 1;
515} 522}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 558ed2ed3124..cb43a2258c51 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -53,6 +53,9 @@ EXPORT_SYMBOL(registered_fb);
53int num_registered_fb __read_mostly; 53int num_registered_fb __read_mostly;
54EXPORT_SYMBOL(num_registered_fb); 54EXPORT_SYMBOL(num_registered_fb);
55 55
56bool fb_center_logo __read_mostly;
57EXPORT_SYMBOL(fb_center_logo);
58
56static struct fb_info *get_fb_info(unsigned int idx) 59static struct fb_info *get_fb_info(unsigned int idx)
57{ 60{
58 struct fb_info *fb_info; 61 struct fb_info *fb_info;
@@ -506,8 +509,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
506 fb_set_logo(info, logo, logo_new, fb_logo.depth); 509 fb_set_logo(info, logo, logo_new, fb_logo.depth);
507 } 510 }
508 511
509#ifdef CONFIG_FB_LOGO_CENTER 512 if (fb_center_logo) {
510 {
511 int xres = info->var.xres; 513 int xres = info->var.xres;
512 int yres = info->var.yres; 514 int yres = info->var.yres;
513 515
@@ -520,11 +522,11 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
520 --n; 522 --n;
521 image.dx = (xres - n * (logo->width + 8) - 8) / 2; 523 image.dx = (xres - n * (logo->width + 8) - 8) / 2;
522 image.dy = y ?: (yres - logo->height) / 2; 524 image.dy = y ?: (yres - logo->height) / 2;
525 } else {
526 image.dx = 0;
527 image.dy = y;
523 } 528 }
524#else 529
525 image.dx = 0;
526 image.dy = y;
527#endif
528 image.width = logo->width; 530 image.width = logo->width;
529 image.height = logo->height; 531 image.height = logo->height;
530 532
@@ -684,9 +686,8 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
684 } 686 }
685 687
686 height = fb_logo.logo->height; 688 height = fb_logo.logo->height;
687#ifdef CONFIG_FB_LOGO_CENTER 689 if (fb_center_logo)
688 height += (yres - fb_logo.logo->height) / 2; 690 height += (yres - fb_logo.logo->height) / 2;
689#endif
690 691
691 return fb_prepare_extra_logos(info, height, yres); 692 return fb_prepare_extra_logos(info, height, yres);
692} 693}
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index a74096c53cb5..43f2a4816860 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1446,9 +1446,9 @@ static int fb_probe(struct platform_device *device)
1446 da8xx_fb_fix.line_length - 1; 1446 da8xx_fb_fix.line_length - 1;
1447 1447
1448 /* allocate palette buffer */ 1448 /* allocate palette buffer */
1449 par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, 1449 par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE,
1450 &par->p_palette_base, 1450 &par->p_palette_base,
1451 GFP_KERNEL | GFP_DMA); 1451 GFP_KERNEL | GFP_DMA);
1452 if (!par->v_palette_base) { 1452 if (!par->v_palette_base) {
1453 dev_err(&device->dev, 1453 dev_err(&device->dev,
1454 "GLCD: kmalloc for palette buffer failed\n"); 1454 "GLCD: kmalloc for palette buffer failed\n");
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 31f769d67195..057d3cdef92e 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index,
318} 318}
319 319
320static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, 320static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp,
321 const char *name, unsigned long address) 321 unsigned long address)
322{ 322{
323 struct offb_par *par = (struct offb_par *) info->par; 323 struct offb_par *par = (struct offb_par *) info->par;
324 324
325 if (dp && !strncmp(name, "ATY,Rage128", 11)) { 325 if (of_node_name_prefix(dp, "ATY,Rage128")) {
326 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 326 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
327 if (par->cmap_adr) 327 if (par->cmap_adr)
328 par->cmap_type = cmap_r128; 328 par->cmap_type = cmap_r128;
329 } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) 329 } else if (of_node_name_prefix(dp, "ATY,RageM3pA") ||
330 || !strncmp(name, "ATY,RageM3p12A", 14))) { 330 of_node_name_prefix(dp, "ATY,RageM3p12A")) {
331 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 331 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
332 if (par->cmap_adr) 332 if (par->cmap_adr)
333 par->cmap_type = cmap_M3A; 333 par->cmap_type = cmap_M3A;
334 } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { 334 } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) {
335 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 335 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
336 if (par->cmap_adr) 336 if (par->cmap_adr)
337 par->cmap_type = cmap_M3B; 337 par->cmap_type = cmap_M3B;
338 } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { 338 } else if (of_node_name_prefix(dp, "ATY,Rage6")) {
339 par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); 339 par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff);
340 if (par->cmap_adr) 340 if (par->cmap_adr)
341 par->cmap_type = cmap_radeon; 341 par->cmap_type = cmap_radeon;
342 } else if (!strncmp(name, "ATY,", 4)) { 342 } else if (of_node_name_prefix(dp, "ATY,")) {
343 unsigned long base = address & 0xff000000UL; 343 unsigned long base = address & 0xff000000UL;
344 par->cmap_adr = 344 par->cmap_adr =
345 ioremap(base + 0x7ff000, 0x1000) + 0xcc0; 345 ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
@@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
350 par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); 350 par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
351 if (par->cmap_adr) 351 if (par->cmap_adr)
352 par->cmap_type = cmap_gxt2000; 352 par->cmap_type = cmap_gxt2000;
353 } else if (dp && !strncmp(name, "vga,Display-", 12)) { 353 } else if (of_node_name_prefix(dp, "vga,Display-")) {
354 /* Look for AVIVO initialized by SLOF */ 354 /* Look for AVIVO initialized by SLOF */
355 struct device_node *pciparent = of_get_parent(dp); 355 struct device_node *pciparent = of_get_parent(dp);
356 const u32 *vid, *did; 356 const u32 *vid, *did;
@@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name,
438 438
439 par->cmap_type = cmap_unknown; 439 par->cmap_type = cmap_unknown;
440 if (depth == 8) 440 if (depth == 8)
441 offb_init_palette_hacks(info, dp, name, address); 441 offb_init_palette_hacks(info, dp, address);
442 else 442 else
443 fix->visual = FB_VISUAL_TRUECOLOR; 443 fix->visual = FB_VISUAL_TRUECOLOR;
444 444
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 53f93616c671..8e23160ec59f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
609 609
610 int r = 0; 610 int r = 0;
611 611
612 memset(&p, 0, sizeof(p));
613
612 switch (cmd) { 614 switch (cmd) {
613 case OMAPFB_SYNC_GFX: 615 case OMAPFB_SYNC_GFX:
614 DBG("ioctl SYNC_GFX\n"); 616 DBG("ioctl SYNC_GFX\n");
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 1e972c4e88b1..d1f6196c8b9a 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -10,15 +10,6 @@ menuconfig LOGO
10 10
11if LOGO 11if LOGO
12 12
13config FB_LOGO_CENTER
14 bool "Center the logo"
15 depends on FB=y
16 help
17 When this option is selected, the bootup logo is centered both
18 horizontally and vertically. If more than one logo is displayed
19 due to multiple CPUs, the collected line of logos is centered
20 as a whole.
21
22config FB_LOGO_EXTRA 13config FB_LOGO_EXTRA
23 bool 14 bool
24 depends on FB=y 15 depends on FB=y
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 728ecd1eea30..fb12fe205f86 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -61,6 +61,10 @@ enum virtio_balloon_vq {
61 VIRTIO_BALLOON_VQ_MAX 61 VIRTIO_BALLOON_VQ_MAX
62}; 62};
63 63
64enum virtio_balloon_config_read {
65 VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
66};
67
64struct virtio_balloon { 68struct virtio_balloon {
65 struct virtio_device *vdev; 69 struct virtio_device *vdev;
66 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; 70 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
@@ -77,14 +81,20 @@ struct virtio_balloon {
77 /* Prevent updating balloon when it is being canceled. */ 81 /* Prevent updating balloon when it is being canceled. */
78 spinlock_t stop_update_lock; 82 spinlock_t stop_update_lock;
79 bool stop_update; 83 bool stop_update;
84 /* Bitmap to indicate if reading the related config fields are needed */
85 unsigned long config_read_bitmap;
80 86
81 /* The list of allocated free pages, waiting to be given back to mm */ 87 /* The list of allocated free pages, waiting to be given back to mm */
82 struct list_head free_page_list; 88 struct list_head free_page_list;
83 spinlock_t free_page_list_lock; 89 spinlock_t free_page_list_lock;
84 /* The number of free page blocks on the above list */ 90 /* The number of free page blocks on the above list */
85 unsigned long num_free_page_blocks; 91 unsigned long num_free_page_blocks;
86 /* The cmd id received from host */ 92 /*
87 u32 cmd_id_received; 93 * The cmd id received from host.
94 * Read it via virtio_balloon_cmd_id_received to get the latest value
95 * sent from host.
96 */
97 u32 cmd_id_received_cache;
88 /* The cmd id that is actively in use */ 98 /* The cmd id that is actively in use */
89 __virtio32 cmd_id_active; 99 __virtio32 cmd_id_active;
90 /* Buffer to store the stop sign */ 100 /* Buffer to store the stop sign */
@@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
390 return num_returned; 400 return num_returned;
391} 401}
392 402
403static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
404{
405 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
406 return;
407
408 /* No need to queue the work if the bit was already set. */
409 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
410 &vb->config_read_bitmap))
411 return;
412
413 queue_work(vb->balloon_wq, &vb->report_free_page_work);
414}
415
393static void virtballoon_changed(struct virtio_device *vdev) 416static void virtballoon_changed(struct virtio_device *vdev)
394{ 417{
395 struct virtio_balloon *vb = vdev->priv; 418 struct virtio_balloon *vb = vdev->priv;
396 unsigned long flags; 419 unsigned long flags;
397 s64 diff = towards_target(vb);
398
399 if (diff) {
400 spin_lock_irqsave(&vb->stop_update_lock, flags);
401 if (!vb->stop_update)
402 queue_work(system_freezable_wq,
403 &vb->update_balloon_size_work);
404 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
405 }
406 420
407 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 421 spin_lock_irqsave(&vb->stop_update_lock, flags);
408 virtio_cread(vdev, struct virtio_balloon_config, 422 if (!vb->stop_update) {
409 free_page_report_cmd_id, &vb->cmd_id_received); 423 queue_work(system_freezable_wq,
410 if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { 424 &vb->update_balloon_size_work);
411 /* Pass ULONG_MAX to give back all the free pages */ 425 virtio_balloon_queue_free_page_work(vb);
412 return_free_pages_to_mm(vb, ULONG_MAX);
413 } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
414 vb->cmd_id_received !=
415 virtio32_to_cpu(vdev, vb->cmd_id_active)) {
416 spin_lock_irqsave(&vb->stop_update_lock, flags);
417 if (!vb->stop_update) {
418 queue_work(vb->balloon_wq,
419 &vb->report_free_page_work);
420 }
421 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
422 }
423 } 426 }
427 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
424} 428}
425 429
426static void update_balloon_size(struct virtio_balloon *vb) 430static void update_balloon_size(struct virtio_balloon *vb)
@@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb)
527 return 0; 531 return 0;
528} 532}
529 533
534static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
535{
536 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
537 &vb->config_read_bitmap))
538 virtio_cread(vb->vdev, struct virtio_balloon_config,
539 free_page_report_cmd_id,
540 &vb->cmd_id_received_cache);
541
542 return vb->cmd_id_received_cache;
543}
544
530static int send_cmd_id_start(struct virtio_balloon *vb) 545static int send_cmd_id_start(struct virtio_balloon *vb)
531{ 546{
532 struct scatterlist sg; 547 struct scatterlist sg;
@@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
537 while (virtqueue_get_buf(vq, &unused)) 552 while (virtqueue_get_buf(vq, &unused))
538 ; 553 ;
539 554
540 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); 555 vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
556 virtio_balloon_cmd_id_received(vb));
541 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); 557 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
542 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); 558 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
543 if (!err) 559 if (!err)
@@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb)
620 * stop the reporting. 636 * stop the reporting.
621 */ 637 */
622 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); 638 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
623 if (cmd_id_active != vb->cmd_id_received) 639 if (unlikely(cmd_id_active !=
640 virtio_balloon_cmd_id_received(vb)))
624 break; 641 break;
625 642
626 /* 643 /*
@@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb)
637 return 0; 654 return 0;
638} 655}
639 656
640static void report_free_page_func(struct work_struct *work) 657static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
641{ 658{
642 int err; 659 int err;
643 struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
644 report_free_page_work);
645 struct device *dev = &vb->vdev->dev; 660 struct device *dev = &vb->vdev->dev;
646 661
647 /* Start by sending the received cmd id to host with an outbuf. */ 662 /* Start by sending the received cmd id to host with an outbuf. */
@@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work)
659 dev_err(dev, "Failed to send a stop id, err = %d\n", err); 674 dev_err(dev, "Failed to send a stop id, err = %d\n", err);
660} 675}
661 676
677static void report_free_page_func(struct work_struct *work)
678{
679 struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
680 report_free_page_work);
681 u32 cmd_id_received;
682
683 cmd_id_received = virtio_balloon_cmd_id_received(vb);
684 if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
685 /* Pass ULONG_MAX to give back all the free pages */
686 return_free_pages_to_mm(vb, ULONG_MAX);
687 } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
688 cmd_id_received !=
689 virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
690 virtio_balloon_report_free_page(vb);
691 }
692}
693
662#ifdef CONFIG_BALLOON_COMPACTION 694#ifdef CONFIG_BALLOON_COMPACTION
663/* 695/*
664 * virtballoon_migratepage - perform the balloon page migration on behalf of 696 * virtballoon_migratepage - perform the balloon page migration on behalf of
@@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
885 goto out_del_vqs; 917 goto out_del_vqs;
886 } 918 }
887 INIT_WORK(&vb->report_free_page_work, report_free_page_func); 919 INIT_WORK(&vb->report_free_page_work, report_free_page_func);
888 vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; 920 vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
889 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, 921 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
890 VIRTIO_BALLOON_CMD_ID_STOP); 922 VIRTIO_BALLOON_CMD_ID_STOP);
891 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, 923 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 4cd9ea5c75be..d9dd0f789279 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
468{ 468{
469 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 469 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
470 unsigned int irq = platform_get_irq(vm_dev->pdev, 0); 470 unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
471 int i, err; 471 int i, err, queue_idx = 0;
472 472
473 err = request_irq(irq, vm_interrupt, IRQF_SHARED, 473 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
474 dev_name(&vdev->dev), vm_dev); 474 dev_name(&vdev->dev), vm_dev);
@@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
476 return err; 476 return err;
477 477
478 for (i = 0; i < nvqs; ++i) { 478 for (i = 0; i < nvqs; ++i) {
479 vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], 479 if (!names[i]) {
480 vqs[i] = NULL;
481 continue;
482 }
483
484 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
480 ctx ? ctx[i] : false); 485 ctx ? ctx[i] : false);
481 if (IS_ERR(vqs[i])) { 486 if (IS_ERR(vqs[i])) {
482 vm_del_vqs(vdev); 487 vm_del_vqs(vdev);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 465a6f5142cc..d0584c040c60 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
285{ 285{
286 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 286 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
287 u16 msix_vec; 287 u16 msix_vec;
288 int i, err, nvectors, allocated_vectors; 288 int i, err, nvectors, allocated_vectors, queue_idx = 0;
289 289
290 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 290 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
291 if (!vp_dev->vqs) 291 if (!vp_dev->vqs)
@@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
321 msix_vec = allocated_vectors++; 321 msix_vec = allocated_vectors++;
322 else 322 else
323 msix_vec = VP_MSIX_VQ_VECTOR; 323 msix_vec = VP_MSIX_VQ_VECTOR;
324 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], 324 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
325 ctx ? ctx[i] : false, 325 ctx ? ctx[i] : false,
326 msix_vec); 326 msix_vec);
327 if (IS_ERR(vqs[i])) { 327 if (IS_ERR(vqs[i])) {
@@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
356 const char * const names[], const bool *ctx) 356 const char * const names[], const bool *ctx)
357{ 357{
358 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 358 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
359 int i, err; 359 int i, err, queue_idx = 0;
360 360
361 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 361 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
362 if (!vp_dev->vqs) 362 if (!vp_dev->vqs)
@@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
374 vqs[i] = NULL; 374 vqs[i] = NULL;
375 continue; 375 continue;
376 } 376 }
377 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], 377 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
378 ctx ? ctx[i] : false, 378 ctx ? ctx[i] : false,
379 VIRTIO_MSI_NO_VECTOR); 379 VIRTIO_MSI_NO_VECTOR);
380 if (IS_ERR(vqs[i])) { 380 if (IS_ERR(vqs[i])) {
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cd7e755484e3..a0b07c331255 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -152,7 +152,12 @@ struct vring_virtqueue {
152 /* Available for packed ring */ 152 /* Available for packed ring */
153 struct { 153 struct {
154 /* Actual memory layout for this queue. */ 154 /* Actual memory layout for this queue. */
155 struct vring_packed vring; 155 struct {
156 unsigned int num;
157 struct vring_packed_desc *desc;
158 struct vring_packed_desc_event *driver;
159 struct vring_packed_desc_event *device;
160 } vring;
156 161
157 /* Driver ring wrap counter. */ 162 /* Driver ring wrap counter. */
158 bool avail_wrap_counter; 163 bool avail_wrap_counter;
@@ -1609,6 +1614,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
1609 !context; 1614 !context;
1610 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 1615 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1611 1616
1617 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1618 vq->weak_barriers = false;
1619
1612 vq->packed.ring_dma_addr = ring_dma_addr; 1620 vq->packed.ring_dma_addr = ring_dma_addr;
1613 vq->packed.driver_event_dma_addr = driver_event_dma_addr; 1621 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1614 vq->packed.device_event_dma_addr = device_event_dma_addr; 1622 vq->packed.device_event_dma_addr = device_event_dma_addr;
@@ -2079,6 +2087,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
2079 !context; 2087 !context;
2080 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 2088 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2081 2089
2090 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2091 vq->weak_barriers = false;
2092
2082 vq->split.queue_dma_addr = 0; 2093 vq->split.queue_dma_addr = 0;
2083 vq->split.queue_size_in_bytes = 0; 2094 vq->split.queue_size_in_bytes = 0;
2084 2095
@@ -2213,6 +2224,8 @@ void vring_transport_features(struct virtio_device *vdev)
2213 break; 2224 break;
2214 case VIRTIO_F_RING_PACKED: 2225 case VIRTIO_F_RING_PACKED:
2215 break; 2226 break;
2227 case VIRTIO_F_ORDER_PLATFORM:
2228 break;
2216 default: 2229 default:
2217 /* We don't understand this bit. */ 2230 /* We don't understand this bit. */
2218 __virtio_clear_bit(vdev, i); 2231 __virtio_clear_bit(vdev, i);
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 5c4a764717c4..81208cd3f4ec 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -17,6 +17,7 @@
17#include <linux/watchdog.h> 17#include <linux/watchdog.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/mod_devicetable.h>
20 21
21#include <asm/mach-ralink/ralink_regs.h> 22#include <asm/mach-ralink/ralink_regs.h>
22 23
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 98967f0a7d10..db7c57d82cfd 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -18,6 +18,7 @@
18#include <linux/watchdog.h> 18#include <linux/watchdog.h>
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/mod_devicetable.h>
21 22
22#include <asm/mach-ralink/ralink_regs.h> 23#include <asm/mach-ralink/ralink_regs.h>
23 24
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c
index 0d3a0fbbd7a5..52941207a12a 100644
--- a/drivers/watchdog/tqmx86_wdt.c
+++ b/drivers/watchdog/tqmx86_wdt.c
@@ -79,13 +79,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev)
79 return -ENOMEM; 79 return -ENOMEM;
80 80
81 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 81 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
82 if (IS_ERR(res)) 82 if (!res)
83 return PTR_ERR(res); 83 return -ENODEV;
84 84
85 priv->io_base = devm_ioport_map(&pdev->dev, res->start, 85 priv->io_base = devm_ioport_map(&pdev->dev, res->start,
86 resource_size(res)); 86 resource_size(res));
87 if (IS_ERR(priv->io_base)) 87 if (!priv->io_base)
88 return PTR_ERR(priv->io_base); 88 return -ENOMEM;
89 89
90 watchdog_set_drvdata(&priv->wdd, priv); 90 watchdog_set_drvdata(&priv->wdd, priv);
91 91
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 93194f3e7540..117e76b2f939 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
1650 xen_have_vector_callback = 0; 1650 xen_have_vector_callback = 0;
1651 return; 1651 return;
1652 } 1652 }
1653 pr_info("Xen HVM callback vector for event delivery is enabled\n"); 1653 pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
1654 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, 1654 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1655 xen_hvm_callback_vector); 1655 xen_hvm_callback_vector);
1656 } 1656 }
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 2e5d845b5091..7aa64d1b119c 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
160 160
161 /* write the data, then modify the indexes */ 161 /* write the data, then modify the indexes */
162 virt_wmb(); 162 virt_wmb();
163 if (ret < 0) 163 if (ret < 0) {
164 atomic_set(&map->read, 0);
164 intf->in_error = ret; 165 intf->in_error = ret;
165 else 166 } else
166 intf->in_prod = prod + ret; 167 intf->in_prod = prod + ret;
167 /* update the indexes, then notify the other end */ 168 /* update the indexes, then notify the other end */
168 virt_wmb(); 169 virt_wmb();
@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
282static void pvcalls_sk_state_change(struct sock *sock) 283static void pvcalls_sk_state_change(struct sock *sock)
283{ 284{
284 struct sock_mapping *map = sock->sk_user_data; 285 struct sock_mapping *map = sock->sk_user_data;
285 struct pvcalls_data_intf *intf;
286 286
287 if (map == NULL) 287 if (map == NULL)
288 return; 288 return;
289 289
290 intf = map->ring; 290 atomic_inc(&map->read);
291 intf->in_error = -ENOTCONN;
292 notify_remote_via_irq(map->irq); 291 notify_remote_via_irq(map->irq);
293} 292}
294 293
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 77224d8f3e6f..8a249c95c193 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -31,6 +31,12 @@
31#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) 31#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
32#define PVCALLS_FRONT_MAX_SPIN 5000 32#define PVCALLS_FRONT_MAX_SPIN 5000
33 33
34static struct proto pvcalls_proto = {
35 .name = "PVCalls",
36 .owner = THIS_MODULE,
37 .obj_size = sizeof(struct sock),
38};
39
34struct pvcalls_bedata { 40struct pvcalls_bedata {
35 struct xen_pvcalls_front_ring ring; 41 struct xen_pvcalls_front_ring ring;
36 grant_ref_t ref; 42 grant_ref_t ref;
@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
335 return ret; 341 return ret;
336} 342}
337 343
344static void free_active_ring(struct sock_mapping *map)
345{
346 if (!map->active.ring)
347 return;
348
349 free_pages((unsigned long)map->active.data.in,
350 map->active.ring->ring_order);
351 free_page((unsigned long)map->active.ring);
352}
353
354static int alloc_active_ring(struct sock_mapping *map)
355{
356 void *bytes;
357
358 map->active.ring = (struct pvcalls_data_intf *)
359 get_zeroed_page(GFP_KERNEL);
360 if (!map->active.ring)
361 goto out;
362
363 map->active.ring->ring_order = PVCALLS_RING_ORDER;
364 bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
365 PVCALLS_RING_ORDER);
366 if (!bytes)
367 goto out;
368
369 map->active.data.in = bytes;
370 map->active.data.out = bytes +
371 XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
372
373 return 0;
374
375out:
376 free_active_ring(map);
377 return -ENOMEM;
378}
379
338static int create_active(struct sock_mapping *map, int *evtchn) 380static int create_active(struct sock_mapping *map, int *evtchn)
339{ 381{
340 void *bytes; 382 void *bytes;
@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
343 *evtchn = -1; 385 *evtchn = -1;
344 init_waitqueue_head(&map->active.inflight_conn_req); 386 init_waitqueue_head(&map->active.inflight_conn_req);
345 387
346 map->active.ring = (struct pvcalls_data_intf *) 388 bytes = map->active.data.in;
347 __get_free_page(GFP_KERNEL | __GFP_ZERO);
348 if (map->active.ring == NULL)
349 goto out_error;
350 map->active.ring->ring_order = PVCALLS_RING_ORDER;
351 bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
352 PVCALLS_RING_ORDER);
353 if (bytes == NULL)
354 goto out_error;
355 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) 389 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
356 map->active.ring->ref[i] = gnttab_grant_foreign_access( 390 map->active.ring->ref[i] = gnttab_grant_foreign_access(
357 pvcalls_front_dev->otherend_id, 391 pvcalls_front_dev->otherend_id,
@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
361 pvcalls_front_dev->otherend_id, 395 pvcalls_front_dev->otherend_id,
362 pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); 396 pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
363 397
364 map->active.data.in = bytes;
365 map->active.data.out = bytes +
366 XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
367
368 ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); 398 ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
369 if (ret) 399 if (ret)
370 goto out_error; 400 goto out_error;
@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
385out_error: 415out_error:
386 if (*evtchn >= 0) 416 if (*evtchn >= 0)
387 xenbus_free_evtchn(pvcalls_front_dev, *evtchn); 417 xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
388 free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
389 free_page((unsigned long)map->active.ring);
390 return ret; 418 return ret;
391} 419}
392 420
@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
406 return PTR_ERR(map); 434 return PTR_ERR(map);
407 435
408 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 436 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
437 ret = alloc_active_ring(map);
438 if (ret < 0) {
439 pvcalls_exit_sock(sock);
440 return ret;
441 }
409 442
410 spin_lock(&bedata->socket_lock); 443 spin_lock(&bedata->socket_lock);
411 ret = get_request(bedata, &req_id); 444 ret = get_request(bedata, &req_id);
412 if (ret < 0) { 445 if (ret < 0) {
413 spin_unlock(&bedata->socket_lock); 446 spin_unlock(&bedata->socket_lock);
447 free_active_ring(map);
414 pvcalls_exit_sock(sock); 448 pvcalls_exit_sock(sock);
415 return ret; 449 return ret;
416 } 450 }
417 ret = create_active(map, &evtchn); 451 ret = create_active(map, &evtchn);
418 if (ret < 0) { 452 if (ret < 0) {
419 spin_unlock(&bedata->socket_lock); 453 spin_unlock(&bedata->socket_lock);
454 free_active_ring(map);
420 pvcalls_exit_sock(sock); 455 pvcalls_exit_sock(sock);
421 return ret; 456 return ret;
422 } 457 }
@@ -469,8 +504,10 @@ static int __write_ring(struct pvcalls_data_intf *intf,
469 virt_mb(); 504 virt_mb();
470 505
471 size = pvcalls_queued(prod, cons, array_size); 506 size = pvcalls_queued(prod, cons, array_size);
472 if (size >= array_size) 507 if (size > array_size)
473 return -EINVAL; 508 return -EINVAL;
509 if (size == array_size)
510 return 0;
474 if (len > array_size - size) 511 if (len > array_size - size)
475 len = array_size - size; 512 len = array_size - size;
476 513
@@ -560,15 +597,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
560 error = intf->in_error; 597 error = intf->in_error;
561 /* get pointers before reading from the ring */ 598 /* get pointers before reading from the ring */
562 virt_rmb(); 599 virt_rmb();
563 if (error < 0)
564 return error;
565 600
566 size = pvcalls_queued(prod, cons, array_size); 601 size = pvcalls_queued(prod, cons, array_size);
567 masked_prod = pvcalls_mask(prod, array_size); 602 masked_prod = pvcalls_mask(prod, array_size);
568 masked_cons = pvcalls_mask(cons, array_size); 603 masked_cons = pvcalls_mask(cons, array_size);
569 604
570 if (size == 0) 605 if (size == 0)
571 return 0; 606 return error ?: size;
572 607
573 if (len > size) 608 if (len > size)
574 len = size; 609 len = size;
@@ -780,25 +815,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
780 } 815 }
781 } 816 }
782 817
783 spin_lock(&bedata->socket_lock); 818 map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
784 ret = get_request(bedata, &req_id); 819 if (map2 == NULL) {
785 if (ret < 0) {
786 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 820 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
787 (void *)&map->passive.flags); 821 (void *)&map->passive.flags);
788 spin_unlock(&bedata->socket_lock); 822 pvcalls_exit_sock(sock);
823 return -ENOMEM;
824 }
825 ret = alloc_active_ring(map2);
826 if (ret < 0) {
827 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
828 (void *)&map->passive.flags);
829 kfree(map2);
789 pvcalls_exit_sock(sock); 830 pvcalls_exit_sock(sock);
790 return ret; 831 return ret;
791 } 832 }
792 map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); 833 spin_lock(&bedata->socket_lock);
793 if (map2 == NULL) { 834 ret = get_request(bedata, &req_id);
835 if (ret < 0) {
794 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 836 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
795 (void *)&map->passive.flags); 837 (void *)&map->passive.flags);
796 spin_unlock(&bedata->socket_lock); 838 spin_unlock(&bedata->socket_lock);
839 free_active_ring(map2);
840 kfree(map2);
797 pvcalls_exit_sock(sock); 841 pvcalls_exit_sock(sock);
798 return -ENOMEM; 842 return ret;
799 } 843 }
844
800 ret = create_active(map2, &evtchn); 845 ret = create_active(map2, &evtchn);
801 if (ret < 0) { 846 if (ret < 0) {
847 free_active_ring(map2);
802 kfree(map2); 848 kfree(map2);
803 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 849 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
804 (void *)&map->passive.flags); 850 (void *)&map->passive.flags);
@@ -839,7 +885,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
839 885
840received: 886received:
841 map2->sock = newsock; 887 map2->sock = newsock;
842 newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL); 888 newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
843 if (!newsock->sk) { 889 if (!newsock->sk) {
844 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; 890 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
845 map->passive.inflight_req_id = PVCALLS_INVALID_ID; 891 map->passive.inflight_req_id = PVCALLS_INVALID_ID;
@@ -1032,8 +1078,8 @@ int pvcalls_front_release(struct socket *sock)
1032 spin_lock(&bedata->socket_lock); 1078 spin_lock(&bedata->socket_lock);
1033 list_del(&map->list); 1079 list_del(&map->list);
1034 spin_unlock(&bedata->socket_lock); 1080 spin_unlock(&bedata->socket_lock);
1035 if (READ_ONCE(map->passive.inflight_req_id) != 1081 if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
1036 PVCALLS_INVALID_ID) { 1082 READ_ONCE(map->passive.inflight_req_id) != 0) {
1037 pvcalls_front_free_map(bedata, 1083 pvcalls_front_free_map(bedata,
1038 map->passive.accept_map); 1084 map->passive.accept_map);
1039 } 1085 }
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 989cf872b98c..bb7888429be6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -645,7 +645,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
645 void *cpu_addr, dma_addr_t dma_addr, size_t size, 645 void *cpu_addr, dma_addr_t dma_addr, size_t size,
646 unsigned long attrs) 646 unsigned long attrs)
647{ 647{
648#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 648#ifdef CONFIG_ARM
649 if (xen_get_dma_ops(dev)->mmap) 649 if (xen_get_dma_ops(dev)->mmap)
650 return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, 650 return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
651 dma_addr, size, attrs); 651 dma_addr, size, attrs);
@@ -662,7 +662,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
662 void *cpu_addr, dma_addr_t handle, size_t size, 662 void *cpu_addr, dma_addr_t handle, size_t size,
663 unsigned long attrs) 663 unsigned long attrs)
664{ 664{
665#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 665#ifdef CONFIG_ARM
666 if (xen_get_dma_ops(dev)->get_sgtable) { 666 if (xen_get_dma_ops(dev)->get_sgtable) {
667#if 0 667#if 0
668 /* 668 /*
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 0568fd986821..e432bd27a2e7 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -208,7 +208,7 @@ again:
208 /* The new front of the queue now owns the state variables. */ 208 /* The new front of the queue now owns the state variables. */
209 next = list_entry(vnode->pending_locks.next, 209 next = list_entry(vnode->pending_locks.next,
210 struct file_lock, fl_u.afs.link); 210 struct file_lock, fl_u.afs.link);
211 vnode->lock_key = afs_file_key(next->fl_file); 211 vnode->lock_key = key_get(afs_file_key(next->fl_file));
212 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 212 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
213 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; 213 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
214 goto again; 214 goto again;
@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
413 /* The new front of the queue now owns the state variables. */ 413 /* The new front of the queue now owns the state variables. */
414 next = list_entry(vnode->pending_locks.next, 414 next = list_entry(vnode->pending_locks.next,
415 struct file_lock, fl_u.afs.link); 415 struct file_lock, fl_u.afs.link);
416 vnode->lock_key = afs_file_key(next->fl_file); 416 vnode->lock_key = key_get(afs_file_key(next->fl_file));
417 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 417 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
418 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; 418 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
419 afs_lock_may_be_available(vnode); 419 afs_lock_may_be_available(vnode);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 6b17d3620414..1a4ce07fb406 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
414 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { 414 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
415 valid = true; 415 valid = true;
416 } else { 416 } else {
417 vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
418 vnode->cb_v_break = vnode->volume->cb_v_break; 417 vnode->cb_v_break = vnode->volume->cb_v_break;
419 valid = false; 418 valid = false;
420 } 419 }
@@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode)
546#endif 545#endif
547 546
548 afs_put_permits(rcu_access_pointer(vnode->permit_cache)); 547 afs_put_permits(rcu_access_pointer(vnode->permit_cache));
548 key_put(vnode->lock_key);
549 vnode->lock_key = NULL;
549 _leave(""); 550 _leave("");
550} 551}
551 552
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index 07bc10f076aa..d443e2bfa094 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus {
161 struct yfs_xdr_u64 max_quota; 161 struct yfs_xdr_u64 max_quota;
162 struct yfs_xdr_u64 file_quota; 162 struct yfs_xdr_u64 file_quota;
163} __packed; 163} __packed;
164
165enum yfs_lock_type {
166 yfs_LockNone = -1,
167 yfs_LockRead = 0,
168 yfs_LockWrite = 1,
169 yfs_LockExtend = 2,
170 yfs_LockRelease = 3,
171 yfs_LockMandatoryRead = 0x100,
172 yfs_LockMandatoryWrite = 0x101,
173 yfs_LockMandatoryExtend = 0x102,
174};
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index a7b44863d502..2c588f9bbbda 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls;
23static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); 23static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
24static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); 24static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
25static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); 25static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
26static void afs_delete_async_call(struct work_struct *);
26static void afs_process_async_call(struct work_struct *); 27static void afs_process_async_call(struct work_struct *);
27static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); 28static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
28static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); 29static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
@@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call)
203 } 204 }
204} 205}
205 206
207static struct afs_call *afs_get_call(struct afs_call *call,
208 enum afs_call_trace why)
209{
210 int u = atomic_inc_return(&call->usage);
211
212 trace_afs_call(call, why, u,
213 atomic_read(&call->net->nr_outstanding_calls),
214 __builtin_return_address(0));
215 return call;
216}
217
206/* 218/*
207 * Queue the call for actual work. 219 * Queue the call for actual work.
208 */ 220 */
209static void afs_queue_call_work(struct afs_call *call) 221static void afs_queue_call_work(struct afs_call *call)
210{ 222{
211 if (call->type->work) { 223 if (call->type->work) {
212 int u = atomic_inc_return(&call->usage);
213
214 trace_afs_call(call, afs_call_trace_work, u,
215 atomic_read(&call->net->nr_outstanding_calls),
216 __builtin_return_address(0));
217
218 INIT_WORK(&call->work, call->type->work); 224 INIT_WORK(&call->work, call->type->work);
219 225
226 afs_get_call(call, afs_call_trace_work);
220 if (!queue_work(afs_wq, &call->work)) 227 if (!queue_work(afs_wq, &call->work))
221 afs_put_call(call); 228 afs_put_call(call);
222 } 229 }
@@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
398 } 405 }
399 } 406 }
400 407
408 /* If the call is going to be asynchronous, we need an extra ref for
409 * the call to hold itself so the caller need not hang on to its ref.
410 */
411 if (call->async)
412 afs_get_call(call, afs_call_trace_get);
413
401 /* create a call */ 414 /* create a call */
402 rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, 415 rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
403 (unsigned long)call, 416 (unsigned long)call,
@@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
438 goto error_do_abort; 451 goto error_do_abort;
439 } 452 }
440 453
441 /* at this point, an async call may no longer exist as it may have 454 /* Note that at this point, we may have received the reply or an abort
442 * already completed */ 455 * - and an asynchronous call may already have completed.
443 if (call->async) 456 */
457 if (call->async) {
458 afs_put_call(call);
444 return -EINPROGRESS; 459 return -EINPROGRESS;
460 }
445 461
446 return afs_wait_for_call_to_complete(call, ac); 462 return afs_wait_for_call_to_complete(call, ac);
447 463
448error_do_abort: 464error_do_abort:
449 call->state = AFS_CALL_COMPLETE;
450 if (ret != -ECONNABORTED) { 465 if (ret != -ECONNABORTED) {
451 rxrpc_kernel_abort_call(call->net->socket, rxcall, 466 rxrpc_kernel_abort_call(call->net->socket, rxcall,
452 RX_USER_ABORT, ret, "KSD"); 467 RX_USER_ABORT, ret, "KSD");
@@ -463,8 +478,24 @@ error_do_abort:
463error_kill_call: 478error_kill_call:
464 if (call->type->done) 479 if (call->type->done)
465 call->type->done(call); 480 call->type->done(call);
466 afs_put_call(call); 481
482 /* We need to dispose of the extra ref we grabbed for an async call.
483 * The call, however, might be queued on afs_async_calls and we need to
484 * make sure we don't get any more notifications that might requeue it.
485 */
486 if (call->rxcall) {
487 rxrpc_kernel_end_call(call->net->socket, call->rxcall);
488 call->rxcall = NULL;
489 }
490 if (call->async) {
491 if (cancel_work_sync(&call->async_work))
492 afs_put_call(call);
493 afs_put_call(call);
494 }
495
467 ac->error = ret; 496 ac->error = ret;
497 call->state = AFS_CALL_COMPLETE;
498 afs_put_call(call);
468 _leave(" = %d", ret); 499 _leave(" = %d", ret);
469 return ret; 500 return ret;
470} 501}
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 95d0761cdb34..155dc14caef9 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -42,9 +42,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
42 if (vldb->fs_mask[i] & type_mask) 42 if (vldb->fs_mask[i] & type_mask)
43 nr_servers++; 43 nr_servers++;
44 44
45 slist = kzalloc(sizeof(struct afs_server_list) + 45 slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL);
46 sizeof(struct afs_server_entry) * nr_servers,
47 GFP_KERNEL);
48 if (!slist) 46 if (!slist)
49 goto error; 47 goto error;
50 48
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 12658c1363ae..5aa57929e8c2 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc,
803 bp = xdr_encode_YFSFid(bp, &vnode->fid); 803 bp = xdr_encode_YFSFid(bp, &vnode->fid);
804 bp = xdr_encode_string(bp, name, namesz); 804 bp = xdr_encode_string(bp, name, namesz);
805 bp = xdr_encode_YFSStoreStatus_mode(bp, mode); 805 bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
806 bp = xdr_encode_u32(bp, 0); /* ViceLockType */ 806 bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
807 yfs_check_req(call, bp); 807 yfs_check_req(call, bp);
808 808
809 afs_use_fs_server(call, fc->cbi); 809 afs_use_fs_server(call, fc->cbi);
diff --git a/fs/aio.c b/fs/aio.c
index b906ff70c90f..aaaaf4d12c73 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1436 if (unlikely(!req->ki_filp)) 1436 if (unlikely(!req->ki_filp))
1437 return -EBADF; 1437 return -EBADF;
1438 req->ki_complete = aio_complete_rw; 1438 req->ki_complete = aio_complete_rw;
1439 req->private = NULL;
1439 req->ki_pos = iocb->aio_offset; 1440 req->ki_pos = iocb->aio_offset;
1440 req->ki_flags = iocb_flags(req->ki_filp); 1441 req->ki_flags = iocb_flags(req->ki_filp);
1441 if (iocb->aio_flags & IOCB_FLAG_RESFD) 1442 if (iocb->aio_flags & IOCB_FLAG_RESFD)
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index d441244b79df..28d9c2b1b3bb 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb,
596 pkt.len = dentry->d_name.len; 596 pkt.len = dentry->d_name.len;
597 memcpy(pkt.name, dentry->d_name.name, pkt.len); 597 memcpy(pkt.name, dentry->d_name.name, pkt.len);
598 pkt.name[pkt.len] = '\0'; 598 pkt.name[pkt.len] = '\0';
599 dput(dentry);
600 599
601 if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire))) 600 if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
602 ret = -EFAULT; 601 ret = -EFAULT;
@@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb,
609 complete_all(&ino->expire_complete); 608 complete_all(&ino->expire_complete);
610 spin_unlock(&sbi->fs_lock); 609 spin_unlock(&sbi->fs_lock);
611 610
611 dput(dentry);
612
612 return ret; 613 return ret;
613} 614}
614 615
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 0e8ea2d9a2bb..078992eee299 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -266,8 +266,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
266 } 266 }
267 root_inode = autofs_get_inode(s, S_IFDIR | 0755); 267 root_inode = autofs_get_inode(s, S_IFDIR | 0755);
268 root = d_make_root(root_inode); 268 root = d_make_root(root_inode);
269 if (!root) 269 if (!root) {
270 ret = -ENOMEM;
270 goto fail_ino; 271 goto fail_ino;
272 }
271 pipe = NULL; 273 pipe = NULL;
272 274
273 root->d_fsdata = ino; 275 root->d_fsdata = ino;
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index d0078cbb718b..e996174cbfc0 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -14,13 +14,30 @@
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16 16
17static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
18static inline char *next_non_spacetab(char *first, const char *last)
19{
20 for (; first <= last; first++)
21 if (!spacetab(*first))
22 return first;
23 return NULL;
24}
25static inline char *next_terminator(char *first, const char *last)
26{
27 for (; first <= last; first++)
28 if (spacetab(*first) || !*first)
29 return first;
30 return NULL;
31}
32
17static int load_script(struct linux_binprm *bprm) 33static int load_script(struct linux_binprm *bprm)
18{ 34{
19 const char *i_arg, *i_name; 35 const char *i_arg, *i_name;
20 char *cp; 36 char *cp, *buf_end;
21 struct file *file; 37 struct file *file;
22 int retval; 38 int retval;
23 39
40 /* Not ours to exec if we don't start with "#!". */
24 if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) 41 if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
25 return -ENOEXEC; 42 return -ENOEXEC;
26 43
@@ -33,23 +50,41 @@ static int load_script(struct linux_binprm *bprm)
33 if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) 50 if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
34 return -ENOENT; 51 return -ENOENT;
35 52
36 /* 53 /* Release since we are not mapping a binary into memory. */
37 * This section does the #! interpretation.
38 * Sorta complicated, but hopefully it will work. -TYT
39 */
40
41 allow_write_access(bprm->file); 54 allow_write_access(bprm->file);
42 fput(bprm->file); 55 fput(bprm->file);
43 bprm->file = NULL; 56 bprm->file = NULL;
44 57
45 for (cp = bprm->buf+2;; cp++) { 58 /*
46 if (cp >= bprm->buf + BINPRM_BUF_SIZE) 59 * This section handles parsing the #! line into separate
60 * interpreter path and argument strings. We must be careful
61 * because bprm->buf is not yet guaranteed to be NUL-terminated
62 * (though the buffer will have trailing NUL padding when the
63 * file size was smaller than the buffer size).
64 *
65 * We do not want to exec a truncated interpreter path, so either
66 * we find a newline (which indicates nothing is truncated), or
67 * we find a space/tab/NUL after the interpreter path (which
68 * itself may be preceded by spaces/tabs). Truncating the
69 * arguments is fine: the interpreter can re-read the script to
70 * parse them on its own.
71 */
72 buf_end = bprm->buf + sizeof(bprm->buf) - 1;
73 cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
74 if (!cp) {
75 cp = next_non_spacetab(bprm->buf + 2, buf_end);
76 if (!cp)
77 return -ENOEXEC; /* Entire buf is spaces/tabs */
78 /*
79 * If there is no later space/tab/NUL we must assume the
80 * interpreter path is truncated.
81 */
82 if (!next_terminator(cp, buf_end))
47 return -ENOEXEC; 83 return -ENOEXEC;
48 if (!*cp || (*cp == '\n')) 84 cp = buf_end;
49 break;
50 } 85 }
86 /* NUL-terminate the buffer and any trailing spaces/tabs. */
51 *cp = '\0'; 87 *cp = '\0';
52
53 while (cp > bprm->buf) { 88 while (cp > bprm->buf) {
54 cp--; 89 cp--;
55 if ((*cp == ' ') || (*cp == '\t')) 90 if ((*cp == ' ') || (*cp == '\t'))
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c546cdce77e6..58a4c1217fa8 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev)
104} 104}
105EXPORT_SYMBOL(invalidate_bdev); 105EXPORT_SYMBOL(invalidate_bdev);
106 106
107static void set_init_blocksize(struct block_device *bdev)
108{
109 unsigned bsize = bdev_logical_block_size(bdev);
110 loff_t size = i_size_read(bdev->bd_inode);
111
112 while (bsize < PAGE_SIZE) {
113 if (size & bsize)
114 break;
115 bsize <<= 1;
116 }
117 bdev->bd_block_size = bsize;
118 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
119}
120
107int set_blocksize(struct block_device *bdev, int size) 121int set_blocksize(struct block_device *bdev, int size)
108{ 122{
109 /* Size must be a power of two, and between 512 and PAGE_SIZE */ 123 /* Size must be a power of two, and between 512 and PAGE_SIZE */
@@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change);
1431 1445
1432void bd_set_size(struct block_device *bdev, loff_t size) 1446void bd_set_size(struct block_device *bdev, loff_t size)
1433{ 1447{
1434 unsigned bsize = bdev_logical_block_size(bdev);
1435
1436 inode_lock(bdev->bd_inode); 1448 inode_lock(bdev->bd_inode);
1437 i_size_write(bdev->bd_inode, size); 1449 i_size_write(bdev->bd_inode, size);
1438 inode_unlock(bdev->bd_inode); 1450 inode_unlock(bdev->bd_inode);
1439 while (bsize < PAGE_SIZE) {
1440 if (size & bsize)
1441 break;
1442 bsize <<= 1;
1443 }
1444 bdev->bd_block_size = bsize;
1445 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1446} 1451}
1447EXPORT_SYMBOL(bd_set_size); 1452EXPORT_SYMBOL(bd_set_size);
1448 1453
@@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1519 } 1524 }
1520 } 1525 }
1521 1526
1522 if (!ret) 1527 if (!ret) {
1523 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); 1528 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1529 set_init_blocksize(bdev);
1530 }
1524 1531
1525 /* 1532 /*
1526 * If the device is invalidated, rescan partition 1533 * If the device is invalidated, rescan partition
@@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1555 goto out_clear; 1562 goto out_clear;
1556 } 1563 }
1557 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); 1564 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1565 set_init_blocksize(bdev);
1558 } 1566 }
1559 1567
1560 if (bdev->bd_bdi == &noop_backing_dev_info) 1568 if (bdev->bd_bdi == &noop_backing_dev_info)
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d92462fe66c8..5a6c39b44c84 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -968,6 +968,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
968 return 0; 968 return 0;
969} 969}
970 970
971static struct extent_buffer *alloc_tree_block_no_bg_flush(
972 struct btrfs_trans_handle *trans,
973 struct btrfs_root *root,
974 u64 parent_start,
975 const struct btrfs_disk_key *disk_key,
976 int level,
977 u64 hint,
978 u64 empty_size)
979{
980 struct btrfs_fs_info *fs_info = root->fs_info;
981 struct extent_buffer *ret;
982
983 /*
984 * If we are COWing a node/leaf from the extent, chunk, device or free
985 * space trees, make sure that we do not finish block group creation of
986 * pending block groups. We do this to avoid a deadlock.
987 * COWing can result in allocation of a new chunk, and flushing pending
988 * block groups (btrfs_create_pending_block_groups()) can be triggered
989 * when finishing allocation of a new chunk. Creation of a pending block
990 * group modifies the extent, chunk, device and free space trees,
991 * therefore we could deadlock with ourselves since we are holding a
992 * lock on an extent buffer that btrfs_create_pending_block_groups() may
993 * try to COW later.
994 * For similar reasons, we also need to delay flushing pending block
995 * groups when splitting a leaf or node, from one of those trees, since
996 * we are holding a write lock on it and its parent or when inserting a
997 * new root node for one of those trees.
998 */
999 if (root == fs_info->extent_root ||
1000 root == fs_info->chunk_root ||
1001 root == fs_info->dev_root ||
1002 root == fs_info->free_space_root)
1003 trans->can_flush_pending_bgs = false;
1004
1005 ret = btrfs_alloc_tree_block(trans, root, parent_start,
1006 root->root_key.objectid, disk_key, level,
1007 hint, empty_size);
1008 trans->can_flush_pending_bgs = true;
1009
1010 return ret;
1011}
1012
971/* 1013/*
972 * does the dirty work in cow of a single block. The parent block (if 1014 * does the dirty work in cow of a single block. The parent block (if
973 * supplied) is updated to point to the new cow copy. The new buffer is marked 1015 * supplied) is updated to point to the new cow copy. The new buffer is marked
@@ -1015,26 +1057,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1015 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 1057 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1016 parent_start = parent->start; 1058 parent_start = parent->start;
1017 1059
1018 /* 1060 cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1019 * If we are COWing a node/leaf from the extent, chunk or device trees, 1061 level, search_start, empty_size);
1020 * make sure that we do not finish block group creation of pending block
1021 * groups. We do this to avoid a deadlock.
1022 * COWing can result in allocation of a new chunk, and flushing pending
1023 * block groups (btrfs_create_pending_block_groups()) can be triggered
1024 * when finishing allocation of a new chunk. Creation of a pending block
1025 * group modifies the extent, chunk and device trees, therefore we could
1026 * deadlock with ourselves since we are holding a lock on an extent
1027 * buffer that btrfs_create_pending_block_groups() may try to COW later.
1028 */
1029 if (root == fs_info->extent_root ||
1030 root == fs_info->chunk_root ||
1031 root == fs_info->dev_root)
1032 trans->can_flush_pending_bgs = false;
1033
1034 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1035 root->root_key.objectid, &disk_key, level,
1036 search_start, empty_size);
1037 trans->can_flush_pending_bgs = true;
1038 if (IS_ERR(cow)) 1062 if (IS_ERR(cow))
1039 return PTR_ERR(cow); 1063 return PTR_ERR(cow);
1040 1064
@@ -3343,8 +3367,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3343 else 3367 else
3344 btrfs_node_key(lower, &lower_key, 0); 3368 btrfs_node_key(lower, &lower_key, 0);
3345 3369
3346 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3370 c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
3347 &lower_key, level, root->node->start, 0); 3371 root->node->start, 0);
3348 if (IS_ERR(c)) 3372 if (IS_ERR(c))
3349 return PTR_ERR(c); 3373 return PTR_ERR(c);
3350 3374
@@ -3473,8 +3497,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3473 mid = (c_nritems + 1) / 2; 3497 mid = (c_nritems + 1) / 2;
3474 btrfs_node_key(c, &disk_key, mid); 3498 btrfs_node_key(c, &disk_key, mid);
3475 3499
3476 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3500 split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
3477 &disk_key, level, c->start, 0); 3501 c->start, 0);
3478 if (IS_ERR(split)) 3502 if (IS_ERR(split))
3479 return PTR_ERR(split); 3503 return PTR_ERR(split);
3480 3504
@@ -4258,8 +4282,8 @@ again:
4258 else 4282 else
4259 btrfs_item_key(l, &disk_key, mid); 4283 btrfs_item_key(l, &disk_key, mid);
4260 4284
4261 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 4285 right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
4262 &disk_key, 0, l->start, 0); 4286 l->start, 0);
4263 if (IS_ERR(right)) 4287 if (IS_ERR(right))
4264 return PTR_ERR(right); 4288 return PTR_ERR(right);
4265 4289
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0a68cf7032f5..7a2a2621f0d9 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -35,6 +35,7 @@
35struct btrfs_trans_handle; 35struct btrfs_trans_handle;
36struct btrfs_transaction; 36struct btrfs_transaction;
37struct btrfs_pending_snapshot; 37struct btrfs_pending_snapshot;
38struct btrfs_delayed_ref_root;
38extern struct kmem_cache *btrfs_trans_handle_cachep; 39extern struct kmem_cache *btrfs_trans_handle_cachep;
39extern struct kmem_cache *btrfs_bit_radix_cachep; 40extern struct kmem_cache *btrfs_bit_radix_cachep;
40extern struct kmem_cache *btrfs_path_cachep; 41extern struct kmem_cache *btrfs_path_cachep;
@@ -786,6 +787,9 @@ enum {
786 * main phase. The fs_info::balance_ctl is initialized. 787 * main phase. The fs_info::balance_ctl is initialized.
787 */ 788 */
788 BTRFS_FS_BALANCE_RUNNING, 789 BTRFS_FS_BALANCE_RUNNING,
790
791 /* Indicate that the cleaner thread is awake and doing something. */
792 BTRFS_FS_CLEANER_RUNNING,
789}; 793};
790 794
791struct btrfs_fs_info { 795struct btrfs_fs_info {
@@ -2661,6 +2665,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2661 unsigned long count); 2665 unsigned long count);
2662int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, 2666int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
2663 unsigned long count, u64 transid, int wait); 2667 unsigned long count, u64 transid, int wait);
2668void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
2669 struct btrfs_delayed_ref_root *delayed_refs,
2670 struct btrfs_delayed_ref_head *head);
2664int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); 2671int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
2665int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 2672int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
2666 struct btrfs_fs_info *fs_info, u64 bytenr, 2673 struct btrfs_fs_info *fs_info, u64 bytenr,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8da2f380d3c0..6a2a2a951705 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1682,6 +1682,8 @@ static int cleaner_kthread(void *arg)
1682 while (1) { 1682 while (1) {
1683 again = 0; 1683 again = 0;
1684 1684
1685 set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1686
1685 /* Make the cleaner go to sleep early. */ 1687 /* Make the cleaner go to sleep early. */
1686 if (btrfs_need_cleaner_sleep(fs_info)) 1688 if (btrfs_need_cleaner_sleep(fs_info))
1687 goto sleep; 1689 goto sleep;
@@ -1728,6 +1730,7 @@ static int cleaner_kthread(void *arg)
1728 */ 1730 */
1729 btrfs_delete_unused_bgs(fs_info); 1731 btrfs_delete_unused_bgs(fs_info);
1730sleep: 1732sleep:
1733 clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1731 if (kthread_should_park()) 1734 if (kthread_should_park())
1732 kthread_parkme(); 1735 kthread_parkme();
1733 if (kthread_should_stop()) 1736 if (kthread_should_stop())
@@ -4201,6 +4204,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4201 spin_lock(&fs_info->ordered_root_lock); 4204 spin_lock(&fs_info->ordered_root_lock);
4202 } 4205 }
4203 spin_unlock(&fs_info->ordered_root_lock); 4206 spin_unlock(&fs_info->ordered_root_lock);
4207
4208 /*
4209 * We need this here because if we've been flipped read-only we won't
4210 * get sync() from the umount, so we need to make sure any ordered
4211 * extents that haven't had their dirty pages IO start writeout yet
4212 * actually get run and error out properly.
4213 */
4214 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4204} 4215}
4205 4216
4206static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 4217static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
@@ -4265,6 +4276,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4265 if (pin_bytes) 4276 if (pin_bytes)
4266 btrfs_pin_extent(fs_info, head->bytenr, 4277 btrfs_pin_extent(fs_info, head->bytenr,
4267 head->num_bytes, 1); 4278 head->num_bytes, 1);
4279 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4268 btrfs_put_delayed_ref_head(head); 4280 btrfs_put_delayed_ref_head(head);
4269 cond_resched(); 4281 cond_resched();
4270 spin_lock(&delayed_refs->lock); 4282 spin_lock(&delayed_refs->lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b15afeae16df..d81035b7ea7d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2456,12 +2456,10 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
2456 return ret ? ret : 1; 2456 return ret ? ret : 1;
2457} 2457}
2458 2458
2459static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans, 2459void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
2460 struct btrfs_delayed_ref_head *head) 2460 struct btrfs_delayed_ref_root *delayed_refs,
2461 struct btrfs_delayed_ref_head *head)
2461{ 2462{
2462 struct btrfs_fs_info *fs_info = trans->fs_info;
2463 struct btrfs_delayed_ref_root *delayed_refs =
2464 &trans->transaction->delayed_refs;
2465 int nr_items = 1; /* Dropping this ref head update. */ 2463 int nr_items = 1; /* Dropping this ref head update. */
2466 2464
2467 if (head->total_ref_mod < 0) { 2465 if (head->total_ref_mod < 0) {
@@ -2544,7 +2542,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
2544 } 2542 }
2545 } 2543 }
2546 2544
2547 cleanup_ref_head_accounting(trans, head); 2545 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
2548 2546
2549 trace_run_delayed_ref_head(fs_info, head, 0); 2547 trace_run_delayed_ref_head(fs_info, head, 0);
2550 btrfs_delayed_ref_unlock(head); 2548 btrfs_delayed_ref_unlock(head);
@@ -4954,6 +4952,15 @@ static void flush_space(struct btrfs_fs_info *fs_info,
4954 ret = 0; 4952 ret = 0;
4955 break; 4953 break;
4956 case COMMIT_TRANS: 4954 case COMMIT_TRANS:
4955 /*
4956 * If we have pending delayed iputs then we could free up a
4957 * bunch of pinned space, so make sure we run the iputs before
4958 * we do our pinned bytes check below.
4959 */
4960 mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
4961 btrfs_run_delayed_iputs(fs_info);
4962 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
4963
4957 ret = may_commit_transaction(fs_info, space_info); 4964 ret = may_commit_transaction(fs_info, space_info);
4958 break; 4965 break;
4959 default: 4966 default:
@@ -7188,7 +7195,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
7188 if (head->must_insert_reserved) 7195 if (head->must_insert_reserved)
7189 ret = 1; 7196 ret = 1;
7190 7197
7191 cleanup_ref_head_accounting(trans, head); 7198 btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
7192 mutex_unlock(&head->mutex); 7199 mutex_unlock(&head->mutex);
7193 btrfs_put_delayed_ref_head(head); 7200 btrfs_put_delayed_ref_head(head);
7194 return ret; 7201 return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 43eb4535319d..5c349667c761 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3129,9 +3129,6 @@ out:
3129 /* once for the tree */ 3129 /* once for the tree */
3130 btrfs_put_ordered_extent(ordered_extent); 3130 btrfs_put_ordered_extent(ordered_extent);
3131 3131
3132 /* Try to release some metadata so we don't get an OOM but don't wait */
3133 btrfs_btree_balance_dirty_nodelay(fs_info);
3134
3135 return ret; 3132 return ret;
3136} 3133}
3137 3134
@@ -3254,6 +3251,8 @@ void btrfs_add_delayed_iput(struct inode *inode)
3254 ASSERT(list_empty(&binode->delayed_iput)); 3251 ASSERT(list_empty(&binode->delayed_iput));
3255 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); 3252 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3256 spin_unlock(&fs_info->delayed_iput_lock); 3253 spin_unlock(&fs_info->delayed_iput_lock);
3254 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3255 wake_up_process(fs_info->cleaner_kthread);
3257} 3256}
3258 3257
3259void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3258void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index fab9443f6a42..9c8e1734429c 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3221,6 +3221,26 @@ static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
3221 inode_lock_nested(inode2, I_MUTEX_CHILD); 3221 inode_lock_nested(inode2, I_MUTEX_CHILD);
3222} 3222}
3223 3223
3224static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
3225 struct inode *inode2, u64 loff2, u64 len)
3226{
3227 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
3228 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
3229}
3230
3231static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
3232 struct inode *inode2, u64 loff2, u64 len)
3233{
3234 if (inode1 < inode2) {
3235 swap(inode1, inode2);
3236 swap(loff1, loff2);
3237 } else if (inode1 == inode2 && loff2 < loff1) {
3238 swap(loff1, loff2);
3239 }
3240 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
3241 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
3242}
3243
3224static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen, 3244static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3225 struct inode *dst, u64 dst_loff) 3245 struct inode *dst, u64 dst_loff)
3226{ 3246{
@@ -3242,11 +3262,12 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3242 return -EINVAL; 3262 return -EINVAL;
3243 3263
3244 /* 3264 /*
3245 * Lock destination range to serialize with concurrent readpages(). 3265 * Lock destination range to serialize with concurrent readpages() and
3266 * source range to serialize with relocation.
3246 */ 3267 */
3247 lock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1); 3268 btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
3248 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1); 3269 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3249 unlock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1); 3270 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3250 3271
3251 return ret; 3272 return ret;
3252} 3273}
@@ -3905,17 +3926,33 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3905 len = ALIGN(src->i_size, bs) - off; 3926 len = ALIGN(src->i_size, bs) - off;
3906 3927
3907 if (destoff > inode->i_size) { 3928 if (destoff > inode->i_size) {
3929 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
3930
3908 ret = btrfs_cont_expand(inode, inode->i_size, destoff); 3931 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
3909 if (ret) 3932 if (ret)
3910 return ret; 3933 return ret;
3934 /*
3935 * We may have truncated the last block if the inode's size is
3936 * not sector size aligned, so we need to wait for writeback to
3937 * complete before proceeding further, otherwise we can race
3938 * with cloning and attempt to increment a reference to an
3939 * extent that no longer exists (writeback completed right after
3940 * we found the previous extent covering eof and before we
3941 * attempted to increment its reference count).
3942 */
3943 ret = btrfs_wait_ordered_range(inode, wb_start,
3944 destoff - wb_start);
3945 if (ret)
3946 return ret;
3911 } 3947 }
3912 3948
3913 /* 3949 /*
3914 * Lock destination range to serialize with concurrent readpages(). 3950 * Lock destination range to serialize with concurrent readpages() and
3951 * source range to serialize with relocation.
3915 */ 3952 */
3916 lock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1); 3953 btrfs_double_extent_lock(src, off, inode, destoff, len);
3917 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 3954 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
3918 unlock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1); 3955 btrfs_double_extent_unlock(src, off, inode, destoff, len);
3919 /* 3956 /*
3920 * Truncate page cache pages so that future reads will see the cloned 3957 * Truncate page cache pages so that future reads will see the cloned
3921 * data immediately and not the previous data. 3958 * data immediately and not the previous data.
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index c5586ffd1426..0a3f122dd61f 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1621,6 +1621,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1621 flags | SB_RDONLY, device_name, data); 1621 flags | SB_RDONLY, device_name, data);
1622 if (IS_ERR(mnt_root)) { 1622 if (IS_ERR(mnt_root)) {
1623 root = ERR_CAST(mnt_root); 1623 root = ERR_CAST(mnt_root);
1624 kfree(subvol_name);
1624 goto out; 1625 goto out;
1625 } 1626 }
1626 1627
@@ -1630,12 +1631,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1630 if (error < 0) { 1631 if (error < 0) {
1631 root = ERR_PTR(error); 1632 root = ERR_PTR(error);
1632 mntput(mnt_root); 1633 mntput(mnt_root);
1634 kfree(subvol_name);
1633 goto out; 1635 goto out;
1634 } 1636 }
1635 } 1637 }
1636 } 1638 }
1637 if (IS_ERR(mnt_root)) { 1639 if (IS_ERR(mnt_root)) {
1638 root = ERR_CAST(mnt_root); 1640 root = ERR_CAST(mnt_root);
1641 kfree(subvol_name);
1639 goto out; 1642 goto out;
1640 } 1643 }
1641 1644
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 127fa1535f58..4ec2b660d014 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -850,14 +850,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
850 850
851 btrfs_trans_release_chunk_metadata(trans); 851 btrfs_trans_release_chunk_metadata(trans);
852 852
853 if (lock && should_end_transaction(trans) &&
854 READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
855 spin_lock(&info->trans_lock);
856 if (cur_trans->state == TRANS_STATE_RUNNING)
857 cur_trans->state = TRANS_STATE_BLOCKED;
858 spin_unlock(&info->trans_lock);
859 }
860
861 if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { 853 if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
862 if (throttle) 854 if (throttle)
863 return btrfs_commit_transaction(trans); 855 return btrfs_commit_transaction(trans);
@@ -1879,6 +1871,21 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1879 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1871 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1880} 1872}
1881 1873
1874/*
1875 * Release reserved delayed ref space of all pending block groups of the
1876 * transaction and remove them from the list
1877 */
1878static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1879{
1880 struct btrfs_fs_info *fs_info = trans->fs_info;
1881 struct btrfs_block_group_cache *block_group, *tmp;
1882
1883 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
1884 btrfs_delayed_refs_rsv_release(fs_info, 1);
1885 list_del_init(&block_group->bg_list);
1886 }
1887}
1888
1882static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) 1889static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1883{ 1890{
1884 /* 1891 /*
@@ -2270,6 +2277,7 @@ scrub_continue:
2270 btrfs_scrub_continue(fs_info); 2277 btrfs_scrub_continue(fs_info);
2271cleanup_transaction: 2278cleanup_transaction:
2272 btrfs_trans_release_metadata(trans); 2279 btrfs_trans_release_metadata(trans);
2280 btrfs_cleanup_pending_block_groups(trans);
2273 btrfs_trans_release_chunk_metadata(trans); 2281 btrfs_trans_release_chunk_metadata(trans);
2274 trans->block_rsv = NULL; 2282 trans->block_rsv = NULL;
2275 btrfs_warn(fs_info, "Skipping commit of aborted transaction."); 2283 btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2576b1a379c9..15561926ab32 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -957,11 +957,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
957 else 957 else
958 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 958 fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
959 959
960 fs_devices->fsid_change = fsid_change_in_progress;
961
962 if (IS_ERR(fs_devices)) 960 if (IS_ERR(fs_devices))
963 return ERR_CAST(fs_devices); 961 return ERR_CAST(fs_devices);
964 962
963 fs_devices->fsid_change = fsid_change_in_progress;
964
965 mutex_lock(&fs_devices->device_list_mutex); 965 mutex_lock(&fs_devices->device_list_mutex);
966 list_add(&fs_devices->fs_list, &fs_uuids); 966 list_add(&fs_devices->fs_list, &fs_uuids);
967 967
@@ -7825,6 +7825,18 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7825 ret = -EUCLEAN; 7825 ret = -EUCLEAN;
7826 goto out; 7826 goto out;
7827 } 7827 }
7828
7829 /* It's possible this device is a dummy for seed device */
7830 if (dev->disk_total_bytes == 0) {
7831 dev = find_device(fs_info->fs_devices->seed, devid, NULL);
7832 if (!dev) {
7833 btrfs_err(fs_info, "failed to find seed devid %llu",
7834 devid);
7835 ret = -EUCLEAN;
7836 goto out;
7837 }
7838 }
7839
7828 if (physical_offset + physical_len > dev->disk_total_bytes) { 7840 if (physical_offset + physical_len > dev->disk_total_bytes) {
7829 btrfs_err(fs_info, 7841 btrfs_err(fs_info,
7830"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7842"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
diff --git a/fs/buffer.c b/fs/buffer.c
index 52d024bfdbc1..48318fb74938 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -200,6 +200,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
200 struct buffer_head *head; 200 struct buffer_head *head;
201 struct page *page; 201 struct page *page;
202 int all_mapped = 1; 202 int all_mapped = 1;
203 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
203 204
204 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 205 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
205 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); 206 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
@@ -227,15 +228,15 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
227 * file io on the block device and getblk. It gets dealt with 228 * file io on the block device and getblk. It gets dealt with
228 * elsewhere, don't buffer_error if we had some unmapped buffers 229 * elsewhere, don't buffer_error if we had some unmapped buffers
229 */ 230 */
230 if (all_mapped) { 231 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
231 printk("__find_get_block_slow() failed. " 232 if (all_mapped && __ratelimit(&last_warned)) {
232 "block=%llu, b_blocknr=%llu\n", 233 printk("__find_get_block_slow() failed. block=%llu, "
233 (unsigned long long)block, 234 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
234 (unsigned long long)bh->b_blocknr); 235 "device %pg blocksize: %d\n",
235 printk("b_state=0x%08lx, b_size=%zu\n", 236 (unsigned long long)block,
236 bh->b_state, bh->b_size); 237 (unsigned long long)bh->b_blocknr,
237 printk("device %pg blocksize: %d\n", bdev, 238 bh->b_state, bh->b_size, bdev,
238 1 << bd_inode->i_blkbits); 239 1 << bd_inode->i_blkbits);
239 } 240 }
240out_unlock: 241out_unlock:
241 spin_unlock(&bd_mapping->private_lock); 242 spin_unlock(&bd_mapping->private_lock);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 5d0c05e288cc..a47c541f8006 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1494,10 +1494,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
1494 if (err < 0 || off >= i_size_read(inode)) { 1494 if (err < 0 || off >= i_size_read(inode)) {
1495 unlock_page(page); 1495 unlock_page(page);
1496 put_page(page); 1496 put_page(page);
1497 if (err == -ENOMEM) 1497 ret = vmf_error(err);
1498 ret = VM_FAULT_OOM;
1499 else
1500 ret = VM_FAULT_SIGBUS;
1501 goto out_inline; 1498 goto out_inline;
1502 } 1499 }
1503 if (err < PAGE_SIZE) 1500 if (err < PAGE_SIZE)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 94c026bba2c2..bba28a5034ba 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1035,6 +1035,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci)
1035 list_del_init(&ci->i_snap_realm_item); 1035 list_del_init(&ci->i_snap_realm_item);
1036 ci->i_snap_realm_counter++; 1036 ci->i_snap_realm_counter++;
1037 ci->i_snap_realm = NULL; 1037 ci->i_snap_realm = NULL;
1038 if (realm->ino == ci->i_vino.ino)
1039 realm->inode = NULL;
1038 spin_unlock(&realm->inodes_with_caps_lock); 1040 spin_unlock(&realm->inodes_with_caps_lock);
1039 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc, 1041 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
1040 realm); 1042 realm);
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 03f4d24db8fe..9455d3aef0c3 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -3,19 +3,6 @@
3 * quota.c - CephFS quota 3 * quota.c - CephFS quota
4 * 4 *
5 * Copyright (C) 2017-2018 SUSE 5 * Copyright (C) 2017-2018 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */ 6 */
20 7
21#include <linux/statfs.h> 8#include <linux/statfs.h>
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 041c27ea8de1..f74193da0e09 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
616 capsnap->size); 616 capsnap->size);
617 617
618 spin_lock(&mdsc->snap_flush_lock); 618 spin_lock(&mdsc->snap_flush_lock);
619 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); 619 if (list_empty(&ci->i_snap_flush_item))
620 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
620 spin_unlock(&mdsc->snap_flush_lock); 621 spin_unlock(&mdsc->snap_flush_lock);
621 return 1; /* caller may want to ceph_flush_snaps */ 622 return 1; /* caller may want to ceph_flush_snaps */
622} 623}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 4e9a7cc488da..da2cd8e89062 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -530,7 +530,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
530 seq_putc(m, ','); 530 seq_putc(m, ',');
531 pos = m->count; 531 pos = m->count;
532 532
533 ret = ceph_print_client_options(m, fsc->client); 533 ret = ceph_print_client_options(m, fsc->client, false);
534 if (ret) 534 if (ret)
535 return ret; 535 return ret;
536 536
@@ -640,7 +640,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
640 opt = NULL; /* fsc->client now owns this */ 640 opt = NULL; /* fsc->client now owns this */
641 641
642 fsc->client->extra_mon_dispatch = extra_mon_dispatch; 642 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
643 fsc->client->osdc.abort_on_full = true; 643 ceph_set_opt(fsc->client, ABORT_ON_FULL);
644 644
645 if (!fsopt->mds_namespace) { 645 if (!fsopt->mds_namespace) {
646 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 646 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 593fb422d0f3..e92a2fee3c57 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -252,6 +252,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
252 seq_printf(m, ",ACL"); 252 seq_printf(m, ",ACL");
253#endif 253#endif
254 seq_putc(m, '\n'); 254 seq_putc(m, '\n');
255 seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize);
255 seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); 256 seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
256 seq_printf(m, "Servers:"); 257 seq_printf(m, "Servers:");
257 258
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 26776eddd85d..7652551a1fc4 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
150extern const struct export_operations cifs_export_ops; 150extern const struct export_operations cifs_export_ops;
151#endif /* CONFIG_CIFS_NFSD_EXPORT */ 151#endif /* CONFIG_CIFS_NFSD_EXPORT */
152 152
153#define CIFS_VERSION "2.15" 153#define CIFS_VERSION "2.17"
154#endif /* _CIFSFS_H */ 154#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 01ded7038b19..94dbdbe5be34 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1438,6 +1438,7 @@ struct mid_q_entry {
1438 int mid_state; /* wish this were enum but can not pass to wait_event */ 1438 int mid_state; /* wish this were enum but can not pass to wait_event */
1439 unsigned int mid_flags; 1439 unsigned int mid_flags;
1440 __le16 command; /* smb command code */ 1440 __le16 command; /* smb command code */
1441 unsigned int optype; /* operation type */
1441 bool large_buf:1; /* if valid response, is pointer to large buf */ 1442 bool large_buf:1; /* if valid response, is pointer to large buf */
1442 bool multiRsp:1; /* multiple trans2 responses for one request */ 1443 bool multiRsp:1; /* multiple trans2 responses for one request */
1443 bool multiEnd:1; /* both received */ 1444 bool multiEnd:1; /* both received */
@@ -1574,6 +1575,25 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
1574 kfree(param); 1575 kfree(param);
1575} 1576}
1576 1577
1578static inline bool is_interrupt_error(int error)
1579{
1580 switch (error) {
1581 case -EINTR:
1582 case -ERESTARTSYS:
1583 case -ERESTARTNOHAND:
1584 case -ERESTARTNOINTR:
1585 return true;
1586 }
1587 return false;
1588}
1589
1590static inline bool is_retryable_error(int error)
1591{
1592 if (is_interrupt_error(error) || error == -EAGAIN)
1593 return true;
1594 return false;
1595}
1596
1577#define MID_FREE 0 1597#define MID_FREE 0
1578#define MID_REQUEST_ALLOCATED 1 1598#define MID_REQUEST_ALLOCATED 1
1579#define MID_REQUEST_SUBMITTED 2 1599#define MID_REQUEST_SUBMITTED 2
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index b1f49c1c543a..bb54ccf8481c 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -128,24 +128,31 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
128 int rc; 128 int rc;
129 struct dfs_cache_tgt_list tl; 129 struct dfs_cache_tgt_list tl;
130 struct dfs_cache_tgt_iterator *it = NULL; 130 struct dfs_cache_tgt_iterator *it = NULL;
131 char tree[MAX_TREE_SIZE + 1]; 131 char *tree;
132 const char *tcp_host; 132 const char *tcp_host;
133 size_t tcp_host_len; 133 size_t tcp_host_len;
134 const char *dfs_host; 134 const char *dfs_host;
135 size_t dfs_host_len; 135 size_t dfs_host_len;
136 136
137 tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
138 if (!tree)
139 return -ENOMEM;
140
137 if (tcon->ipc) { 141 if (tcon->ipc) {
138 snprintf(tree, sizeof(tree), "\\\\%s\\IPC$", 142 snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
139 tcon->ses->server->hostname); 143 tcon->ses->server->hostname);
140 return CIFSTCon(0, tcon->ses, tree, tcon, nlsc); 144 rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
145 goto out;
141 } 146 }
142 147
143 if (!tcon->dfs_path) 148 if (!tcon->dfs_path) {
144 return CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc); 149 rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc);
150 goto out;
151 }
145 152
146 rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); 153 rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl);
147 if (rc) 154 if (rc)
148 return rc; 155 goto out;
149 156
150 extract_unc_hostname(tcon->ses->server->hostname, &tcp_host, 157 extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
151 &tcp_host_len); 158 &tcp_host_len);
@@ -165,7 +172,7 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
165 continue; 172 continue;
166 } 173 }
167 174
168 snprintf(tree, sizeof(tree), "\\%s", tgt); 175 snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
169 176
170 rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc); 177 rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
171 if (!rc) 178 if (!rc)
@@ -182,6 +189,8 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
182 rc = -ENOENT; 189 rc = -ENOENT;
183 } 190 }
184 dfs_cache_free_tgts(&tl); 191 dfs_cache_free_tgts(&tl);
192out:
193 kfree(tree);
185 return rc; 194 return rc;
186} 195}
187#else 196#else
@@ -1540,18 +1549,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
1540} 1549}
1541 1550
1542static int 1551static int
1543cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1552__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1553 bool malformed)
1544{ 1554{
1545 int length; 1555 int length;
1546 struct cifs_readdata *rdata = mid->callback_data;
1547 1556
1548 length = cifs_discard_remaining_data(server); 1557 length = cifs_discard_remaining_data(server);
1549 dequeue_mid(mid, rdata->result); 1558 dequeue_mid(mid, malformed);
1550 mid->resp_buf = server->smallbuf; 1559 mid->resp_buf = server->smallbuf;
1551 server->smallbuf = NULL; 1560 server->smallbuf = NULL;
1552 return length; 1561 return length;
1553} 1562}
1554 1563
1564static int
1565cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1566{
1567 struct cifs_readdata *rdata = mid->callback_data;
1568
1569 return __cifs_readv_discard(server, mid, rdata->result);
1570}
1571
1555int 1572int
1556cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1573cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1557{ 1574{
@@ -1593,12 +1610,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1593 return -1; 1610 return -1;
1594 } 1611 }
1595 1612
1613 /* set up first two iov for signature check and to get credits */
1614 rdata->iov[0].iov_base = buf;
1615 rdata->iov[0].iov_len = 4;
1616 rdata->iov[1].iov_base = buf + 4;
1617 rdata->iov[1].iov_len = server->total_read - 4;
1618 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1619 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1620 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1621 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1622
1596 /* Was the SMB read successful? */ 1623 /* Was the SMB read successful? */
1597 rdata->result = server->ops->map_error(buf, false); 1624 rdata->result = server->ops->map_error(buf, false);
1598 if (rdata->result != 0) { 1625 if (rdata->result != 0) {
1599 cifs_dbg(FYI, "%s: server returned error %d\n", 1626 cifs_dbg(FYI, "%s: server returned error %d\n",
1600 __func__, rdata->result); 1627 __func__, rdata->result);
1601 return cifs_readv_discard(server, mid); 1628 /* normal error on read response */
1629 return __cifs_readv_discard(server, mid, false);
1602 } 1630 }
1603 1631
1604 /* Is there enough to get to the rest of the READ_RSP header? */ 1632 /* Is there enough to get to the rest of the READ_RSP header? */
@@ -1642,14 +1670,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1642 server->total_read += length; 1670 server->total_read += length;
1643 } 1671 }
1644 1672
1645 /* set up first iov for signature check */
1646 rdata->iov[0].iov_base = buf;
1647 rdata->iov[0].iov_len = 4;
1648 rdata->iov[1].iov_base = buf + 4;
1649 rdata->iov[1].iov_len = server->total_read - 4;
1650 cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
1651 rdata->iov[0].iov_base, server->total_read);
1652
1653 /* how much data is in the response? */ 1673 /* how much data is in the response? */
1654#ifdef CONFIG_CIFS_SMB_DIRECT 1674#ifdef CONFIG_CIFS_SMB_DIRECT
1655 use_rdma_mr = rdata->mr; 1675 use_rdma_mr = rdata->mr;
@@ -2114,7 +2134,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
2114 2134
2115 for (j = 0; j < nr_pages; j++) { 2135 for (j = 0; j < nr_pages; j++) {
2116 unlock_page(wdata2->pages[j]); 2136 unlock_page(wdata2->pages[j]);
2117 if (rc != 0 && rc != -EAGAIN) { 2137 if (rc != 0 && !is_retryable_error(rc)) {
2118 SetPageError(wdata2->pages[j]); 2138 SetPageError(wdata2->pages[j]);
2119 end_page_writeback(wdata2->pages[j]); 2139 end_page_writeback(wdata2->pages[j]);
2120 put_page(wdata2->pages[j]); 2140 put_page(wdata2->pages[j]);
@@ -2123,7 +2143,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
2123 2143
2124 if (rc) { 2144 if (rc) {
2125 kref_put(&wdata2->refcount, cifs_writedata_release); 2145 kref_put(&wdata2->refcount, cifs_writedata_release);
2126 if (rc == -EAGAIN) 2146 if (is_retryable_error(rc))
2127 continue; 2147 continue;
2128 break; 2148 break;
2129 } 2149 }
@@ -2132,7 +2152,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
2132 i += nr_pages; 2152 i += nr_pages;
2133 } while (i < wdata->nr_pages); 2153 } while (i < wdata->nr_pages);
2134 2154
2135 mapping_set_error(inode->i_mapping, rc); 2155 if (rc != 0 && !is_retryable_error(rc))
2156 mapping_set_error(inode->i_mapping, rc);
2136 kref_put(&wdata->refcount, cifs_writedata_release); 2157 kref_put(&wdata->refcount, cifs_writedata_release);
2137} 2158}
2138 2159
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f66529679ca2..8463c940e0e5 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -433,9 +433,10 @@ static void reconn_inval_dfs_target(struct TCP_Server_Info *server,
433 kfree(server->hostname); 433 kfree(server->hostname);
434 434
435 server->hostname = extract_hostname(name); 435 server->hostname = extract_hostname(name);
436 if (!server->hostname) { 436 if (IS_ERR(server->hostname)) {
437 cifs_dbg(FYI, "%s: failed to extract hostname from target: %d\n", 437 cifs_dbg(FYI,
438 __func__, -ENOMEM); 438 "%s: failed to extract hostname from target: %ld\n",
439 __func__, PTR_ERR(server->hostname));
439 } 440 }
440} 441}
441 442
@@ -719,6 +720,21 @@ server_unresponsive(struct TCP_Server_Info *server)
719 return false; 720 return false;
720} 721}
721 722
723static inline bool
724zero_credits(struct TCP_Server_Info *server)
725{
726 int val;
727
728 spin_lock(&server->req_lock);
729 val = server->credits + server->echo_credits + server->oplock_credits;
730 if (server->in_flight == 0 && val == 0) {
731 spin_unlock(&server->req_lock);
732 return true;
733 }
734 spin_unlock(&server->req_lock);
735 return false;
736}
737
722static int 738static int
723cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) 739cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
724{ 740{
@@ -731,6 +747,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
731 for (total_read = 0; msg_data_left(smb_msg); total_read += length) { 747 for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
732 try_to_freeze(); 748 try_to_freeze();
733 749
750 /* reconnect if no credits and no requests in flight */
751 if (zero_credits(server)) {
752 cifs_reconnect(server);
753 return -ECONNABORTED;
754 }
755
734 if (server_unresponsive(server)) 756 if (server_unresponsive(server))
735 return -ECONNABORTED; 757 return -ECONNABORTED;
736 if (cifs_rdma_enabled(server) && server->smbd_conn) 758 if (cifs_rdma_enabled(server) && server->smbd_conn)
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index cd63c4a70875..09b7d0d4f6e4 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -776,6 +776,7 @@ static int get_tgt_list(const struct dfs_cache_entry *ce,
776 it->it_name = kstrndup(t->t_name, strlen(t->t_name), 776 it->it_name = kstrndup(t->t_name, strlen(t->t_name),
777 GFP_KERNEL); 777 GFP_KERNEL);
778 if (!it->it_name) { 778 if (!it->it_name) {
779 kfree(it);
779 rc = -ENOMEM; 780 rc = -ENOMEM;
780 goto err_free_it; 781 goto err_free_it;
781 } 782 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e3e3a7550205..659ce1b92c44 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -733,7 +733,8 @@ reopen_success:
733 733
734 if (can_flush) { 734 if (can_flush) {
735 rc = filemap_write_and_wait(inode->i_mapping); 735 rc = filemap_write_and_wait(inode->i_mapping);
736 mapping_set_error(inode->i_mapping, rc); 736 if (!is_interrupt_error(rc))
737 mapping_set_error(inode->i_mapping, rc);
737 738
738 if (tcon->unix_ext) 739 if (tcon->unix_ext)
739 rc = cifs_get_inode_info_unix(&inode, full_path, 740 rc = cifs_get_inode_info_unix(&inode, full_path,
@@ -1132,14 +1133,18 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1132 1133
1133 /* 1134 /*
1134 * Accessing maxBuf is racy with cifs_reconnect - need to store value 1135 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1135 * and check it for zero before using. 1136 * and check it before using.
1136 */ 1137 */
1137 max_buf = tcon->ses->server->maxBuf; 1138 max_buf = tcon->ses->server->maxBuf;
1138 if (!max_buf) { 1139 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1139 free_xid(xid); 1140 free_xid(xid);
1140 return -EINVAL; 1141 return -EINVAL;
1141 } 1142 }
1142 1143
1144 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1145 PAGE_SIZE);
1146 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1147 PAGE_SIZE);
1143 max_num = (max_buf - sizeof(struct smb_hdr)) / 1148 max_num = (max_buf - sizeof(struct smb_hdr)) /
1144 sizeof(LOCKING_ANDX_RANGE); 1149 sizeof(LOCKING_ANDX_RANGE);
1145 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 1150 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
@@ -1472,12 +1477,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1472 1477
1473 /* 1478 /*
1474 * Accessing maxBuf is racy with cifs_reconnect - need to store value 1479 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1475 * and check it for zero before using. 1480 * and check it before using.
1476 */ 1481 */
1477 max_buf = tcon->ses->server->maxBuf; 1482 max_buf = tcon->ses->server->maxBuf;
1478 if (!max_buf) 1483 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1479 return -EINVAL; 1484 return -EINVAL;
1480 1485
1486 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1487 PAGE_SIZE);
1488 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1489 PAGE_SIZE);
1481 max_num = (max_buf - sizeof(struct smb_hdr)) / 1490 max_num = (max_buf - sizeof(struct smb_hdr)) /
1482 sizeof(LOCKING_ANDX_RANGE); 1491 sizeof(LOCKING_ANDX_RANGE);
1483 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 1492 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
@@ -2110,6 +2119,7 @@ static int cifs_writepages(struct address_space *mapping,
2110 pgoff_t end, index; 2119 pgoff_t end, index;
2111 struct cifs_writedata *wdata; 2120 struct cifs_writedata *wdata;
2112 int rc = 0; 2121 int rc = 0;
2122 int saved_rc = 0;
2113 unsigned int xid; 2123 unsigned int xid;
2114 2124
2115 /* 2125 /*
@@ -2138,8 +2148,10 @@ retry:
2138 2148
2139 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize, 2149 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2140 &wsize, &credits); 2150 &wsize, &credits);
2141 if (rc) 2151 if (rc != 0) {
2152 done = true;
2142 break; 2153 break;
2154 }
2143 2155
2144 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1; 2156 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2145 2157
@@ -2147,6 +2159,7 @@ retry:
2147 &found_pages); 2159 &found_pages);
2148 if (!wdata) { 2160 if (!wdata) {
2149 rc = -ENOMEM; 2161 rc = -ENOMEM;
2162 done = true;
2150 add_credits_and_wake_if(server, credits, 0); 2163 add_credits_and_wake_if(server, credits, 0);
2151 break; 2164 break;
2152 } 2165 }
@@ -2175,7 +2188,7 @@ retry:
2175 if (rc != 0) { 2188 if (rc != 0) {
2176 add_credits_and_wake_if(server, wdata->credits, 0); 2189 add_credits_and_wake_if(server, wdata->credits, 0);
2177 for (i = 0; i < nr_pages; ++i) { 2190 for (i = 0; i < nr_pages; ++i) {
2178 if (rc == -EAGAIN) 2191 if (is_retryable_error(rc))
2179 redirty_page_for_writepage(wbc, 2192 redirty_page_for_writepage(wbc,
2180 wdata->pages[i]); 2193 wdata->pages[i]);
2181 else 2194 else
@@ -2183,7 +2196,7 @@ retry:
2183 end_page_writeback(wdata->pages[i]); 2196 end_page_writeback(wdata->pages[i]);
2184 put_page(wdata->pages[i]); 2197 put_page(wdata->pages[i]);
2185 } 2198 }
2186 if (rc != -EAGAIN) 2199 if (!is_retryable_error(rc))
2187 mapping_set_error(mapping, rc); 2200 mapping_set_error(mapping, rc);
2188 } 2201 }
2189 kref_put(&wdata->refcount, cifs_writedata_release); 2202 kref_put(&wdata->refcount, cifs_writedata_release);
@@ -2193,6 +2206,15 @@ retry:
2193 continue; 2206 continue;
2194 } 2207 }
2195 2208
2209 /* Return immediately if we received a signal during writing */
2210 if (is_interrupt_error(rc)) {
2211 done = true;
2212 break;
2213 }
2214
2215 if (rc != 0 && saved_rc == 0)
2216 saved_rc = rc;
2217
2196 wbc->nr_to_write -= nr_pages; 2218 wbc->nr_to_write -= nr_pages;
2197 if (wbc->nr_to_write <= 0) 2219 if (wbc->nr_to_write <= 0)
2198 done = true; 2220 done = true;
@@ -2210,6 +2232,9 @@ retry:
2210 goto retry; 2232 goto retry;
2211 } 2233 }
2212 2234
2235 if (saved_rc != 0)
2236 rc = saved_rc;
2237
2213 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2238 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2214 mapping->writeback_index = index; 2239 mapping->writeback_index = index;
2215 2240
@@ -2242,8 +2267,8 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2242 set_page_writeback(page); 2267 set_page_writeback(page);
2243retry_write: 2268retry_write:
2244 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE); 2269 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2245 if (rc == -EAGAIN) { 2270 if (is_retryable_error(rc)) {
2246 if (wbc->sync_mode == WB_SYNC_ALL) 2271 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
2247 goto retry_write; 2272 goto retry_write;
2248 redirty_page_for_writepage(wbc, page); 2273 redirty_page_for_writepage(wbc, page);
2249 } else if (rc != 0) { 2274 } else if (rc != 0) {
@@ -2671,6 +2696,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2671 2696
2672 rc = cifs_write_allocate_pages(wdata->pages, nr_pages); 2697 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2673 if (rc) { 2698 if (rc) {
2699 kvfree(wdata->pages);
2674 kfree(wdata); 2700 kfree(wdata);
2675 add_credits_and_wake_if(server, credits, 0); 2701 add_credits_and_wake_if(server, credits, 0);
2676 break; 2702 break;
@@ -2682,6 +2708,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2682 if (rc) { 2708 if (rc) {
2683 for (i = 0; i < nr_pages; i++) 2709 for (i = 0; i < nr_pages; i++)
2684 put_page(wdata->pages[i]); 2710 put_page(wdata->pages[i]);
2711 kvfree(wdata->pages);
2685 kfree(wdata); 2712 kfree(wdata);
2686 add_credits_and_wake_if(server, credits, 0); 2713 add_credits_and_wake_if(server, credits, 0);
2687 break; 2714 break;
@@ -3361,8 +3388,12 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3361 } 3388 }
3362 3389
3363 rc = cifs_read_allocate_pages(rdata, npages); 3390 rc = cifs_read_allocate_pages(rdata, npages);
3364 if (rc) 3391 if (rc) {
3365 goto error; 3392 kvfree(rdata->pages);
3393 kfree(rdata);
3394 add_credits_and_wake_if(server, credits, 0);
3395 break;
3396 }
3366 3397
3367 rdata->tailsz = PAGE_SIZE; 3398 rdata->tailsz = PAGE_SIZE;
3368 } 3399 }
@@ -3382,7 +3413,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3382 if (!rdata->cfile->invalidHandle || 3413 if (!rdata->cfile->invalidHandle ||
3383 !(rc = cifs_reopen_file(rdata->cfile, true))) 3414 !(rc = cifs_reopen_file(rdata->cfile, true)))
3384 rc = server->ops->async_readv(rdata); 3415 rc = server->ops->async_readv(rdata);
3385error:
3386 if (rc) { 3416 if (rc) {
3387 add_credits_and_wake_if(server, rdata->credits, 0); 3417 add_credits_and_wake_if(server, rdata->credits, 0);
3388 kref_put(&rdata->refcount, 3418 kref_put(&rdata->refcount,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 13fb59aadebc..478003644916 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2257,6 +2257,11 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
2257 * the flush returns error? 2257 * the flush returns error?
2258 */ 2258 */
2259 rc = filemap_write_and_wait(inode->i_mapping); 2259 rc = filemap_write_and_wait(inode->i_mapping);
2260 if (is_interrupt_error(rc)) {
2261 rc = -ERESTARTSYS;
2262 goto out;
2263 }
2264
2260 mapping_set_error(inode->i_mapping, rc); 2265 mapping_set_error(inode->i_mapping, rc);
2261 rc = 0; 2266 rc = 0;
2262 2267
@@ -2400,6 +2405,11 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
2400 * the flush returns error? 2405 * the flush returns error?
2401 */ 2406 */
2402 rc = filemap_write_and_wait(inode->i_mapping); 2407 rc = filemap_write_and_wait(inode->i_mapping);
2408 if (is_interrupt_error(rc)) {
2409 rc = -ERESTARTSYS;
2410 goto cifs_setattr_exit;
2411 }
2412
2403 mapping_set_error(inode->i_mapping, rc); 2413 mapping_set_error(inode->i_mapping, rc);
2404 rc = 0; 2414 rc = 0;
2405 2415
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 4ed10dd086e6..b204e84b87fb 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -122,12 +122,14 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
122 122
123 /* 123 /*
124 * Accessing maxBuf is racy with cifs_reconnect - need to store value 124 * Accessing maxBuf is racy with cifs_reconnect - need to store value
125 * and check it for zero before using. 125 * and check it before using.
126 */ 126 */
127 max_buf = tcon->ses->server->maxBuf; 127 max_buf = tcon->ses->server->maxBuf;
128 if (!max_buf) 128 if (max_buf < sizeof(struct smb2_lock_element))
129 return -EINVAL; 129 return -EINVAL;
130 130
131 BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
132 max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
131 max_num = max_buf / sizeof(struct smb2_lock_element); 133 max_num = max_buf / sizeof(struct smb2_lock_element);
132 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); 134 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
133 if (!buf) 135 if (!buf)
@@ -264,6 +266,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
264 return -EINVAL; 266 return -EINVAL;
265 } 267 }
266 268
269 BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
270 max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
267 max_num = max_buf / sizeof(struct smb2_lock_element); 271 max_num = max_buf / sizeof(struct smb2_lock_element);
268 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); 272 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
269 if (!buf) { 273 if (!buf) {
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index f14533da3a93..01a76bccdb8d 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -293,6 +293,8 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
293 int rc; 293 int rc;
294 struct smb2_file_all_info *smb2_data; 294 struct smb2_file_all_info *smb2_data;
295 __u32 create_options = 0; 295 __u32 create_options = 0;
296 struct cifs_fid fid;
297 bool no_cached_open = tcon->nohandlecache;
296 298
297 *adjust_tz = false; 299 *adjust_tz = false;
298 *symlink = false; 300 *symlink = false;
@@ -301,6 +303,21 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
301 GFP_KERNEL); 303 GFP_KERNEL);
302 if (smb2_data == NULL) 304 if (smb2_data == NULL)
303 return -ENOMEM; 305 return -ENOMEM;
306
307 /* If it is a root and its handle is cached then use it */
308 if (!strlen(full_path) && !no_cached_open) {
309 rc = open_shroot(xid, tcon, &fid);
310 if (rc)
311 goto out;
312 rc = SMB2_query_info(xid, tcon, fid.persistent_fid,
313 fid.volatile_fid, smb2_data);
314 close_shroot(&tcon->crfid);
315 if (rc)
316 goto out;
317 move_smb2_info_to_cifs(data, smb2_data);
318 goto out;
319 }
320
304 if (backup_cred(cifs_sb)) 321 if (backup_cred(cifs_sb))
305 create_options |= CREATE_OPEN_BACKUP_INTENT; 322 create_options |= CREATE_OPEN_BACKUP_INTENT;
306 323
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 6a9c47541c53..7b8b58fb4d3f 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
648 if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK) 648 if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
649 return false; 649 return false;
650 650
651 if (rsp->sync_hdr.CreditRequest) {
652 spin_lock(&server->req_lock);
653 server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
654 spin_unlock(&server->req_lock);
655 wake_up(&server->request_q);
656 }
657
651 if (rsp->StructureSize != 658 if (rsp->StructureSize !=
652 smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { 659 smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
653 if (le16_to_cpu(rsp->StructureSize) == 44) 660 if (le16_to_cpu(rsp->StructureSize) == 44)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index cf7eb891804f..6f96e2292856 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -34,6 +34,7 @@
34#include "cifs_ioctl.h" 34#include "cifs_ioctl.h"
35#include "smbdirect.h" 35#include "smbdirect.h"
36 36
37/* Change credits for different ops and return the total number of credits */
37static int 38static int
38change_conf(struct TCP_Server_Info *server) 39change_conf(struct TCP_Server_Info *server)
39{ 40{
@@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server)
41 server->oplock_credits = server->echo_credits = 0; 42 server->oplock_credits = server->echo_credits = 0;
42 switch (server->credits) { 43 switch (server->credits) {
43 case 0: 44 case 0:
44 return -1; 45 return 0;
45 case 1: 46 case 1:
46 server->echoes = false; 47 server->echoes = false;
47 server->oplocks = false; 48 server->oplocks = false;
48 cifs_dbg(VFS, "disabling echoes and oplocks\n");
49 break; 49 break;
50 case 2: 50 case 2:
51 server->echoes = true; 51 server->echoes = true;
52 server->oplocks = false; 52 server->oplocks = false;
53 server->echo_credits = 1; 53 server->echo_credits = 1;
54 cifs_dbg(FYI, "disabling oplocks\n");
55 break; 54 break;
56 default: 55 default:
57 server->echoes = true; 56 server->echoes = true;
@@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server)
64 server->echo_credits = 1; 63 server->echo_credits = 1;
65 } 64 }
66 server->credits -= server->echo_credits + server->oplock_credits; 65 server->credits -= server->echo_credits + server->oplock_credits;
67 return 0; 66 return server->credits + server->echo_credits + server->oplock_credits;
68} 67}
69 68
70static void 69static void
71smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, 70smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
72 const int optype) 71 const int optype)
73{ 72{
74 int *val, rc = 0; 73 int *val, rc = -1;
74
75 spin_lock(&server->req_lock); 75 spin_lock(&server->req_lock);
76 val = server->ops->get_credits_field(server, optype); 76 val = server->ops->get_credits_field(server, optype);
77 77
@@ -101,8 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
101 } 101 }
102 spin_unlock(&server->req_lock); 102 spin_unlock(&server->req_lock);
103 wake_up(&server->request_q); 103 wake_up(&server->request_q);
104 if (rc) 104
105 cifs_reconnect(server); 105 if (server->tcpStatus == CifsNeedReconnect)
106 return;
107
108 switch (rc) {
109 case -1:
110 /* change_conf hasn't been executed */
111 break;
112 case 0:
113 cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
114 break;
115 case 1:
116 cifs_dbg(VFS, "disabling echoes and oplocks\n");
117 break;
118 case 2:
119 cifs_dbg(FYI, "disabling oplocks\n");
120 break;
121 default:
122 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
123 }
106} 124}
107 125
108static void 126static void
@@ -136,7 +154,11 @@ smb2_get_credits(struct mid_q_entry *mid)
136{ 154{
137 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf; 155 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
138 156
139 return le16_to_cpu(shdr->CreditRequest); 157 if (mid->mid_state == MID_RESPONSE_RECEIVED
158 || mid->mid_state == MID_RESPONSE_MALFORMED)
159 return le16_to_cpu(shdr->CreditRequest);
160
161 return 0;
140} 162}
141 163
142static int 164static int
@@ -165,14 +187,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
165 187
166 scredits = server->credits; 188 scredits = server->credits;
167 /* can deadlock with reopen */ 189 /* can deadlock with reopen */
168 if (scredits == 1) { 190 if (scredits <= 8) {
169 *num = SMB2_MAX_BUFFER_SIZE; 191 *num = SMB2_MAX_BUFFER_SIZE;
170 *credits = 0; 192 *credits = 0;
171 break; 193 break;
172 } 194 }
173 195
174 /* leave one credit for a possible reopen */ 196 /* leave some credits for reopen and other ops */
175 scredits--; 197 scredits -= 8;
176 *num = min_t(unsigned int, size, 198 *num = min_t(unsigned int, size,
177 scredits * SMB2_MAX_BUFFER_SIZE); 199 scredits * SMB2_MAX_BUFFER_SIZE);
178 200
@@ -844,7 +866,9 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
844 FILE_READ_EA, 866 FILE_READ_EA,
845 FILE_FULL_EA_INFORMATION, 867 FILE_FULL_EA_INFORMATION,
846 SMB2_O_INFO_FILE, 868 SMB2_O_INFO_FILE,
847 SMB2_MAX_EA_BUF, 869 CIFSMaxBufSize -
870 MAX_SMB2_CREATE_RESPONSE_SIZE -
871 MAX_SMB2_CLOSE_RESPONSE_SIZE,
848 &rsp_iov, &buftype, cifs_sb); 872 &rsp_iov, &buftype, cifs_sb);
849 if (rc) { 873 if (rc) {
850 /* 874 /*
@@ -3189,11 +3213,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3189 server->ops->is_status_pending(buf, server, 0)) 3213 server->ops->is_status_pending(buf, server, 0))
3190 return -1; 3214 return -1;
3191 3215
3192 rdata->result = server->ops->map_error(buf, false); 3216 /* set up first two iov to get credits */
3217 rdata->iov[0].iov_base = buf;
3218 rdata->iov[0].iov_len = 4;
3219 rdata->iov[1].iov_base = buf + 4;
3220 rdata->iov[1].iov_len =
3221 min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
3222 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3223 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3224 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3225 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3226
3227 rdata->result = server->ops->map_error(buf, true);
3193 if (rdata->result != 0) { 3228 if (rdata->result != 0) {
3194 cifs_dbg(FYI, "%s: server returned error %d\n", 3229 cifs_dbg(FYI, "%s: server returned error %d\n",
3195 __func__, rdata->result); 3230 __func__, rdata->result);
3196 dequeue_mid(mid, rdata->result); 3231 /* normal error on read response */
3232 dequeue_mid(mid, false);
3197 return 0; 3233 return 0;
3198 } 3234 }
3199 3235
@@ -3266,14 +3302,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3266 return 0; 3302 return 0;
3267 } 3303 }
3268 3304
3269 /* set up first iov for signature check */
3270 rdata->iov[0].iov_base = buf;
3271 rdata->iov[0].iov_len = 4;
3272 rdata->iov[1].iov_base = buf + 4;
3273 rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
3274 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3275 rdata->iov[0].iov_base, server->vals->read_rsp_size);
3276
3277 length = rdata->copy_into_pages(server, rdata, &iter); 3305 length = rdata->copy_into_pages(server, rdata, &iter);
3278 3306
3279 kfree(bvec); 3307 kfree(bvec);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index e57f6aa1d638..77b3aaa39b35 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -162,24 +162,31 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
162 int rc; 162 int rc;
163 struct dfs_cache_tgt_list tl; 163 struct dfs_cache_tgt_list tl;
164 struct dfs_cache_tgt_iterator *it = NULL; 164 struct dfs_cache_tgt_iterator *it = NULL;
165 char tree[MAX_TREE_SIZE + 1]; 165 char *tree;
166 const char *tcp_host; 166 const char *tcp_host;
167 size_t tcp_host_len; 167 size_t tcp_host_len;
168 const char *dfs_host; 168 const char *dfs_host;
169 size_t dfs_host_len; 169 size_t dfs_host_len;
170 170
171 tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
172 if (!tree)
173 return -ENOMEM;
174
171 if (tcon->ipc) { 175 if (tcon->ipc) {
172 snprintf(tree, sizeof(tree), "\\\\%s\\IPC$", 176 snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
173 tcon->ses->server->hostname); 177 tcon->ses->server->hostname);
174 return SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); 178 rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
179 goto out;
175 } 180 }
176 181
177 if (!tcon->dfs_path) 182 if (!tcon->dfs_path) {
178 return SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc); 183 rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc);
184 goto out;
185 }
179 186
180 rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); 187 rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl);
181 if (rc) 188 if (rc)
182 return rc; 189 goto out;
183 190
184 extract_unc_hostname(tcon->ses->server->hostname, &tcp_host, 191 extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
185 &tcp_host_len); 192 &tcp_host_len);
@@ -199,7 +206,7 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
199 continue; 206 continue;
200 } 207 }
201 208
202 snprintf(tree, sizeof(tree), "\\%s", tgt); 209 snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
203 210
204 rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); 211 rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
205 if (!rc) 212 if (!rc)
@@ -216,6 +223,8 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
216 rc = -ENOENT; 223 rc = -ENOENT;
217 } 224 }
218 dfs_cache_free_tgts(&tl); 225 dfs_cache_free_tgts(&tl);
226out:
227 kfree(tree);
219 return rc; 228 return rc;
220} 229}
221#else 230#else
@@ -2807,6 +2816,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
2807 int resp_buftype = CIFS_NO_BUFFER; 2816 int resp_buftype = CIFS_NO_BUFFER;
2808 struct cifs_ses *ses = tcon->ses; 2817 struct cifs_ses *ses = tcon->ses;
2809 int flags = 0; 2818 int flags = 0;
2819 bool allocated = false;
2810 2820
2811 cifs_dbg(FYI, "Query Info\n"); 2821 cifs_dbg(FYI, "Query Info\n");
2812 2822
@@ -2846,14 +2856,21 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
2846 "Error %d allocating memory for acl\n", 2856 "Error %d allocating memory for acl\n",
2847 rc); 2857 rc);
2848 *dlen = 0; 2858 *dlen = 0;
2859 rc = -ENOMEM;
2849 goto qinf_exit; 2860 goto qinf_exit;
2850 } 2861 }
2862 allocated = true;
2851 } 2863 }
2852 } 2864 }
2853 2865
2854 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), 2866 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
2855 le32_to_cpu(rsp->OutputBufferLength), 2867 le32_to_cpu(rsp->OutputBufferLength),
2856 &rsp_iov, min_len, *data); 2868 &rsp_iov, min_len, *data);
2869 if (rc && allocated) {
2870 kfree(*data);
2871 *data = NULL;
2872 *dlen = 0;
2873 }
2857 2874
2858qinf_exit: 2875qinf_exit:
2859 SMB2_query_info_free(&rqst); 2876 SMB2_query_info_free(&rqst);
@@ -2907,9 +2924,10 @@ smb2_echo_callback(struct mid_q_entry *mid)
2907{ 2924{
2908 struct TCP_Server_Info *server = mid->callback_data; 2925 struct TCP_Server_Info *server = mid->callback_data;
2909 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; 2926 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
2910 unsigned int credits_received = 1; 2927 unsigned int credits_received = 0;
2911 2928
2912 if (mid->mid_state == MID_RESPONSE_RECEIVED) 2929 if (mid->mid_state == MID_RESPONSE_RECEIVED
2930 || mid->mid_state == MID_RESPONSE_MALFORMED)
2913 credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest); 2931 credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
2914 2932
2915 DeleteMidQEntry(mid); 2933 DeleteMidQEntry(mid);
@@ -3166,7 +3184,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
3166 struct TCP_Server_Info *server = tcon->ses->server; 3184 struct TCP_Server_Info *server = tcon->ses->server;
3167 struct smb2_sync_hdr *shdr = 3185 struct smb2_sync_hdr *shdr =
3168 (struct smb2_sync_hdr *)rdata->iov[0].iov_base; 3186 (struct smb2_sync_hdr *)rdata->iov[0].iov_base;
3169 unsigned int credits_received = 1; 3187 unsigned int credits_received = 0;
3170 struct smb_rqst rqst = { .rq_iov = rdata->iov, 3188 struct smb_rqst rqst = { .rq_iov = rdata->iov,
3171 .rq_nvec = 2, 3189 .rq_nvec = 2,
3172 .rq_pages = rdata->pages, 3190 .rq_pages = rdata->pages,
@@ -3205,6 +3223,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
3205 task_io_account_read(rdata->got_bytes); 3223 task_io_account_read(rdata->got_bytes);
3206 cifs_stats_bytes_read(tcon, rdata->got_bytes); 3224 cifs_stats_bytes_read(tcon, rdata->got_bytes);
3207 break; 3225 break;
3226 case MID_RESPONSE_MALFORMED:
3227 credits_received = le16_to_cpu(shdr->CreditRequest);
3228 /* fall through */
3208 default: 3229 default:
3209 if (rdata->result != -ENODATA) 3230 if (rdata->result != -ENODATA)
3210 rdata->result = -EIO; 3231 rdata->result = -EIO;
@@ -3220,8 +3241,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
3220 rdata->mr = NULL; 3241 rdata->mr = NULL;
3221 } 3242 }
3222#endif 3243#endif
3223 if (rdata->result) 3244 if (rdata->result && rdata->result != -ENODATA) {
3224 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 3245 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
3246 trace_smb3_read_err(0 /* xid */,
3247 rdata->cfile->fid.persistent_fid,
3248 tcon->tid, tcon->ses->Suid, rdata->offset,
3249 rdata->bytes, rdata->result);
3250 } else
3251 trace_smb3_read_done(0 /* xid */,
3252 rdata->cfile->fid.persistent_fid,
3253 tcon->tid, tcon->ses->Suid,
3254 rdata->offset, rdata->got_bytes);
3225 3255
3226 queue_work(cifsiod_wq, &rdata->work); 3256 queue_work(cifsiod_wq, &rdata->work);
3227 DeleteMidQEntry(mid); 3257 DeleteMidQEntry(mid);
@@ -3278,12 +3308,14 @@ smb2_async_readv(struct cifs_readdata *rdata)
3278 if (rdata->credits) { 3308 if (rdata->credits) {
3279 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, 3309 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
3280 SMB2_MAX_BUFFER_SIZE)); 3310 SMB2_MAX_BUFFER_SIZE));
3281 shdr->CreditRequest = shdr->CreditCharge; 3311 shdr->CreditRequest =
3312 cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
3282 spin_lock(&server->req_lock); 3313 spin_lock(&server->req_lock);
3283 server->credits += rdata->credits - 3314 server->credits += rdata->credits -
3284 le16_to_cpu(shdr->CreditCharge); 3315 le16_to_cpu(shdr->CreditCharge);
3285 spin_unlock(&server->req_lock); 3316 spin_unlock(&server->req_lock);
3286 wake_up(&server->request_q); 3317 wake_up(&server->request_q);
3318 rdata->credits = le16_to_cpu(shdr->CreditCharge);
3287 flags |= CIFS_HAS_CREDITS; 3319 flags |= CIFS_HAS_CREDITS;
3288 } 3320 }
3289 3321
@@ -3294,13 +3326,11 @@ smb2_async_readv(struct cifs_readdata *rdata)
3294 if (rc) { 3326 if (rc) {
3295 kref_put(&rdata->refcount, cifs_readdata_release); 3327 kref_put(&rdata->refcount, cifs_readdata_release);
3296 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); 3328 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
3297 trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid, 3329 trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
3298 io_parms.tcon->tid, io_parms.tcon->ses->Suid, 3330 io_parms.tcon->tid,
3299 io_parms.offset, io_parms.length); 3331 io_parms.tcon->ses->Suid,
3300 } else 3332 io_parms.offset, io_parms.length, rc);
3301 trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid, 3333 }
3302 io_parms.tcon->tid, io_parms.tcon->ses->Suid,
3303 io_parms.offset, io_parms.length);
3304 3334
3305 cifs_small_buf_release(buf); 3335 cifs_small_buf_release(buf);
3306 return rc; 3336 return rc;
@@ -3344,10 +3374,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
3344 if (rc != -ENODATA) { 3374 if (rc != -ENODATA) {
3345 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 3375 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
3346 cifs_dbg(VFS, "Send error in read = %d\n", rc); 3376 cifs_dbg(VFS, "Send error in read = %d\n", rc);
3377 trace_smb3_read_err(xid, req->PersistentFileId,
3378 io_parms->tcon->tid, ses->Suid,
3379 io_parms->offset, io_parms->length,
3380 rc);
3347 } 3381 }
3348 trace_smb3_read_err(rc, xid, req->PersistentFileId,
3349 io_parms->tcon->tid, ses->Suid,
3350 io_parms->offset, io_parms->length);
3351 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 3382 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3352 return rc == -ENODATA ? 0 : rc; 3383 return rc == -ENODATA ? 0 : rc;
3353 } else 3384 } else
@@ -3388,7 +3419,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
3388 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 3419 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
3389 unsigned int written; 3420 unsigned int written;
3390 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 3421 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
3391 unsigned int credits_received = 1; 3422 unsigned int credits_received = 0;
3392 3423
3393 switch (mid->mid_state) { 3424 switch (mid->mid_state) {
3394 case MID_RESPONSE_RECEIVED: 3425 case MID_RESPONSE_RECEIVED:
@@ -3416,6 +3447,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
3416 case MID_RETRY_NEEDED: 3447 case MID_RETRY_NEEDED:
3417 wdata->result = -EAGAIN; 3448 wdata->result = -EAGAIN;
3418 break; 3449 break;
3450 case MID_RESPONSE_MALFORMED:
3451 credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
3452 /* fall through */
3419 default: 3453 default:
3420 wdata->result = -EIO; 3454 wdata->result = -EIO;
3421 break; 3455 break;
@@ -3433,8 +3467,17 @@ smb2_writev_callback(struct mid_q_entry *mid)
3433 wdata->mr = NULL; 3467 wdata->mr = NULL;
3434 } 3468 }
3435#endif 3469#endif
3436 if (wdata->result) 3470 if (wdata->result) {
3437 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 3471 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
3472 trace_smb3_write_err(0 /* no xid */,
3473 wdata->cfile->fid.persistent_fid,
3474 tcon->tid, tcon->ses->Suid, wdata->offset,
3475 wdata->bytes, wdata->result);
3476 } else
3477 trace_smb3_write_done(0 /* no xid */,
3478 wdata->cfile->fid.persistent_fid,
3479 tcon->tid, tcon->ses->Suid,
3480 wdata->offset, wdata->bytes);
3438 3481
3439 queue_work(cifsiod_wq, &wdata->work); 3482 queue_work(cifsiod_wq, &wdata->work);
3440 DeleteMidQEntry(mid); 3483 DeleteMidQEntry(mid);
@@ -3555,12 +3598,14 @@ smb2_async_writev(struct cifs_writedata *wdata,
3555 if (wdata->credits) { 3598 if (wdata->credits) {
3556 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, 3599 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
3557 SMB2_MAX_BUFFER_SIZE)); 3600 SMB2_MAX_BUFFER_SIZE));
3558 shdr->CreditRequest = shdr->CreditCharge; 3601 shdr->CreditRequest =
3602 cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
3559 spin_lock(&server->req_lock); 3603 spin_lock(&server->req_lock);
3560 server->credits += wdata->credits - 3604 server->credits += wdata->credits -
3561 le16_to_cpu(shdr->CreditCharge); 3605 le16_to_cpu(shdr->CreditCharge);
3562 spin_unlock(&server->req_lock); 3606 spin_unlock(&server->req_lock);
3563 wake_up(&server->request_q); 3607 wake_up(&server->request_q);
3608 wdata->credits = le16_to_cpu(shdr->CreditCharge);
3564 flags |= CIFS_HAS_CREDITS; 3609 flags |= CIFS_HAS_CREDITS;
3565 } 3610 }
3566 3611
@@ -3574,10 +3619,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
3574 wdata->bytes, rc); 3619 wdata->bytes, rc);
3575 kref_put(&wdata->refcount, release); 3620 kref_put(&wdata->refcount, release);
3576 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 3621 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
3577 } else 3622 }
3578 trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
3579 tcon->tid, tcon->ses->Suid, wdata->offset,
3580 wdata->bytes);
3581 3623
3582async_writev_out: 3624async_writev_out:
3583 cifs_small_buf_release(req); 3625 cifs_small_buf_release(req);
@@ -3803,8 +3845,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
3803 rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) { 3845 rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
3804 srch_inf->endOfSearch = true; 3846 srch_inf->endOfSearch = true;
3805 rc = 0; 3847 rc = 0;
3806 } 3848 } else
3807 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); 3849 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
3808 goto qdir_exit; 3850 goto qdir_exit;
3809 } 3851 }
3810 3852
@@ -4399,8 +4441,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
4399 rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); 4441 rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
4400 cifs_small_buf_release(req); 4442 cifs_small_buf_release(req);
4401 4443
4402 please_key_low = (__u64 *)req->LeaseKey; 4444 please_key_low = (__u64 *)lease_key;
4403 please_key_high = (__u64 *)(req->LeaseKey+8); 4445 please_key_high = (__u64 *)(lease_key+8);
4404 if (rc) { 4446 if (rc) {
4405 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 4447 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
4406 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, 4448 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 7a2d0a2255e6..538e2299805f 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -84,8 +84,9 @@
84 84
85#define NUMBER_OF_SMB2_COMMANDS 0x0013 85#define NUMBER_OF_SMB2_COMMANDS 0x0013
86 86
87/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ 87/* 52 transform hdr + 64 hdr + 88 create rsp */
88#define MAX_SMB2_HDR_SIZE 0x00b0 88#define SMB2_TRANSFORM_HEADER_SIZE 52
89#define MAX_SMB2_HDR_SIZE 204
89 90
90#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) 91#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
91#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd) 92#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
@@ -648,6 +649,13 @@ struct smb2_create_req {
648 __u8 Buffer[0]; 649 __u8 Buffer[0];
649} __packed; 650} __packed;
650 651
652/*
653 * Maximum size of a SMB2_CREATE response is 64 (smb2 header) +
654 * 88 (fixed part of create response) + 520 (path) + 150 (contexts) +
655 * 2 bytes of padding.
656 */
657#define MAX_SMB2_CREATE_RESPONSE_SIZE 824
658
651struct smb2_create_rsp { 659struct smb2_create_rsp {
652 struct smb2_sync_hdr sync_hdr; 660 struct smb2_sync_hdr sync_hdr;
653 __le16 StructureSize; /* Must be 89 */ 661 __le16 StructureSize; /* Must be 89 */
@@ -996,6 +1004,11 @@ struct smb2_close_req {
996 __u64 VolatileFileId; /* opaque endianness */ 1004 __u64 VolatileFileId; /* opaque endianness */
997} __packed; 1005} __packed;
998 1006
1007/*
1008 * Maximum size of a SMB2_CLOSE response is 64 (smb2 header) + 60 (data)
1009 */
1010#define MAX_SMB2_CLOSE_RESPONSE_SIZE 124
1011
999struct smb2_close_rsp { 1012struct smb2_close_rsp {
1000 struct smb2_sync_hdr sync_hdr; 1013 struct smb2_sync_hdr sync_hdr;
1001 __le16 StructureSize; /* 60 */ 1014 __le16 StructureSize; /* 60 */
@@ -1398,8 +1411,6 @@ struct smb2_file_link_info { /* encoding of request for level 11 */
1398 char FileName[0]; /* Name to be assigned to new link */ 1411 char FileName[0]; /* Name to be assigned to new link */
1399} __packed; /* level 11 Set */ 1412} __packed; /* level 11 Set */
1400 1413
1401#define SMB2_MAX_EA_BUF 65536
1402
1403struct smb2_file_full_ea_info { /* encoding of response for level 15 */ 1414struct smb2_file_full_ea_info { /* encoding of response for level 15 */
1404 __le32 next_entry_offset; 1415 __le32 next_entry_offset;
1405 __u8 flags; 1416 __u8 flags;
diff --git a/fs/cifs/trace.c b/fs/cifs/trace.c
index bd4a546feec1..465483787193 100644
--- a/fs/cifs/trace.c
+++ b/fs/cifs/trace.c
@@ -3,16 +3,6 @@
3 * Copyright (C) 2018, Microsoft Corporation. 3 * Copyright (C) 2018, Microsoft Corporation.
4 * 4 *
5 * Author(s): Steve French <stfrench@microsoft.com> 5 * Author(s): Steve French <stfrench@microsoft.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU General Public License for more details.
16 */ 6 */
17#define CREATE_TRACE_POINTS 7#define CREATE_TRACE_POINTS
18#include "trace.h" 8#include "trace.h"
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index fb049809555f..59be48206932 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -3,16 +3,6 @@
3 * Copyright (C) 2018, Microsoft Corporation. 3 * Copyright (C) 2018, Microsoft Corporation.
4 * 4 *
5 * Author(s): Steve French <stfrench@microsoft.com> 5 * Author(s): Steve French <stfrench@microsoft.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU General Public License for more details.
16 */ 6 */
17#undef TRACE_SYSTEM 7#undef TRACE_SYSTEM
18#define TRACE_SYSTEM cifs 8#define TRACE_SYSTEM cifs
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 5be7302853b6..53532bd3f50d 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -387,7 +387,7 @@ smbd_done:
387 if (rc < 0 && rc != -EINTR) 387 if (rc < 0 && rc != -EINTR)
388 cifs_dbg(VFS, "Error %d sending data on socket to server\n", 388 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
389 rc); 389 rc);
390 else 390 else if (rc > 0)
391 rc = 0; 391 rc = 0;
392 392
393 return rc; 393 return rc;
@@ -783,8 +783,25 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
783} 783}
784 784
785static void 785static void
786cifs_noop_callback(struct mid_q_entry *mid) 786cifs_compound_callback(struct mid_q_entry *mid)
787{
788 struct TCP_Server_Info *server = mid->server;
789
790 add_credits(server, server->ops->get_credits(mid), mid->optype);
791}
792
793static void
794cifs_compound_last_callback(struct mid_q_entry *mid)
787{ 795{
796 cifs_compound_callback(mid);
797 cifs_wake_up_task(mid);
798}
799
800static void
801cifs_cancelled_callback(struct mid_q_entry *mid)
802{
803 cifs_compound_callback(mid);
804 DeleteMidQEntry(mid);
788} 805}
789 806
790int 807int
@@ -795,7 +812,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
795 int i, j, rc = 0; 812 int i, j, rc = 0;
796 int timeout, optype; 813 int timeout, optype;
797 struct mid_q_entry *midQ[MAX_COMPOUND]; 814 struct mid_q_entry *midQ[MAX_COMPOUND];
798 unsigned int credits = 0; 815 bool cancelled_mid[MAX_COMPOUND] = {false};
816 unsigned int credits[MAX_COMPOUND] = {0};
799 char *buf; 817 char *buf;
800 818
801 timeout = flags & CIFS_TIMEOUT_MASK; 819 timeout = flags & CIFS_TIMEOUT_MASK;
@@ -813,13 +831,31 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
813 return -ENOENT; 831 return -ENOENT;
814 832
815 /* 833 /*
816 * Ensure that we do not send more than 50 overlapping requests 834 * Ensure we obtain 1 credit per request in the compound chain.
817 * to the same server. We may make this configurable later or 835 * It can be optimized further by waiting for all the credits
818 * use ses->maxReq. 836 * at once but this can wait long enough if we don't have enough
837 * credits due to some heavy operations in progress or the server
838 * not granting us much, so a fallback to the current approach is
839 * needed anyway.
819 */ 840 */
820 rc = wait_for_free_request(ses->server, timeout, optype); 841 for (i = 0; i < num_rqst; i++) {
821 if (rc) 842 rc = wait_for_free_request(ses->server, timeout, optype);
822 return rc; 843 if (rc) {
844 /*
845 * We haven't sent an SMB packet to the server yet but
846 * we already obtained credits for i requests in the
847 * compound chain - need to return those credits back
848 * for future use. Note that we need to call add_credits
849 * multiple times to match the way we obtained credits
850 * in the first place and to account for in flight
851 * requests correctly.
852 */
853 for (j = 0; j < i; j++)
854 add_credits(ses->server, 1, optype);
855 return rc;
856 }
857 credits[i] = 1;
858 }
823 859
824 /* 860 /*
825 * Make sure that we sign in the same order that we send on this socket 861 * Make sure that we sign in the same order that we send on this socket
@@ -835,18 +871,24 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
835 for (j = 0; j < i; j++) 871 for (j = 0; j < i; j++)
836 cifs_delete_mid(midQ[j]); 872 cifs_delete_mid(midQ[j]);
837 mutex_unlock(&ses->server->srv_mutex); 873 mutex_unlock(&ses->server->srv_mutex);
874
838 /* Update # of requests on wire to server */ 875 /* Update # of requests on wire to server */
839 add_credits(ses->server, 1, optype); 876 for (j = 0; j < num_rqst; j++)
877 add_credits(ses->server, credits[j], optype);
840 return PTR_ERR(midQ[i]); 878 return PTR_ERR(midQ[i]);
841 } 879 }
842 880
843 midQ[i]->mid_state = MID_REQUEST_SUBMITTED; 881 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
882 midQ[i]->optype = optype;
844 /* 883 /*
845 * We don't invoke the callback compounds unless it is the last 884 * Invoke callback for every part of the compound chain
846 * request. 885 * to calculate credits properly. Wake up this thread only when
886 * the last element is received.
847 */ 887 */
848 if (i < num_rqst - 1) 888 if (i < num_rqst - 1)
849 midQ[i]->callback = cifs_noop_callback; 889 midQ[i]->callback = cifs_compound_callback;
890 else
891 midQ[i]->callback = cifs_compound_last_callback;
850 } 892 }
851 cifs_in_send_inc(ses->server); 893 cifs_in_send_inc(ses->server);
852 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags); 894 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
@@ -860,8 +902,20 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
860 902
861 mutex_unlock(&ses->server->srv_mutex); 903 mutex_unlock(&ses->server->srv_mutex);
862 904
863 if (rc < 0) 905 if (rc < 0) {
906 /* Sending failed for some reason - return credits back */
907 for (i = 0; i < num_rqst; i++)
908 add_credits(ses->server, credits[i], optype);
864 goto out; 909 goto out;
910 }
911
912 /*
913 * At this point the request is passed to the network stack - we assume
914 * that any credits taken from the server structure on the client have
915 * been spent and we can't return them back. Once we receive responses
916 * we will collect credits granted by the server in the mid callbacks
917 * and add those credits to the server structure.
918 */
865 919
866 /* 920 /*
867 * Compounding is never used during session establish. 921 * Compounding is never used during session establish.
@@ -875,36 +929,34 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
875 929
876 for (i = 0; i < num_rqst; i++) { 930 for (i = 0; i < num_rqst; i++) {
877 rc = wait_for_response(ses->server, midQ[i]); 931 rc = wait_for_response(ses->server, midQ[i]);
878 if (rc != 0) { 932 if (rc != 0)
933 break;
934 }
935 if (rc != 0) {
936 for (; i < num_rqst; i++) {
879 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n", 937 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
880 midQ[i]->mid, le16_to_cpu(midQ[i]->command)); 938 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
881 send_cancel(ses->server, &rqst[i], midQ[i]); 939 send_cancel(ses->server, &rqst[i], midQ[i]);
882 spin_lock(&GlobalMid_Lock); 940 spin_lock(&GlobalMid_Lock);
883 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) { 941 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
884 midQ[i]->mid_flags |= MID_WAIT_CANCELLED; 942 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
885 midQ[i]->callback = DeleteMidQEntry; 943 midQ[i]->callback = cifs_cancelled_callback;
886 spin_unlock(&GlobalMid_Lock); 944 cancelled_mid[i] = true;
887 add_credits(ses->server, 1, optype); 945 credits[i] = 0;
888 return rc;
889 } 946 }
890 spin_unlock(&GlobalMid_Lock); 947 spin_unlock(&GlobalMid_Lock);
891 } 948 }
892 } 949 }
893 950
894 for (i = 0; i < num_rqst; i++)
895 if (midQ[i]->resp_buf)
896 credits += ses->server->ops->get_credits(midQ[i]);
897 if (!credits)
898 credits = 1;
899
900 for (i = 0; i < num_rqst; i++) { 951 for (i = 0; i < num_rqst; i++) {
901 if (rc < 0) 952 if (rc < 0)
902 goto out; 953 goto out;
903 954
904 rc = cifs_sync_mid_result(midQ[i], ses->server); 955 rc = cifs_sync_mid_result(midQ[i], ses->server);
905 if (rc != 0) { 956 if (rc != 0) {
906 add_credits(ses->server, credits, optype); 957 /* mark this mid as cancelled to not free it below */
907 return rc; 958 cancelled_mid[i] = true;
959 goto out;
908 } 960 }
909 961
910 if (!midQ[i]->resp_buf || 962 if (!midQ[i]->resp_buf ||
@@ -951,9 +1003,10 @@ out:
951 * This is prevented above by using a noop callback that will not 1003 * This is prevented above by using a noop callback that will not
952 * wake this thread except for the very last PDU. 1004 * wake this thread except for the very last PDU.
953 */ 1005 */
954 for (i = 0; i < num_rqst; i++) 1006 for (i = 0; i < num_rqst; i++) {
955 cifs_delete_mid(midQ[i]); 1007 if (!cancelled_mid[i])
956 add_credits(ses->server, credits, optype); 1008 cifs_delete_mid(midQ[i]);
1009 }
957 1010
958 return rc; 1011 return rc;
959} 1012}
diff --git a/fs/dcache.c b/fs/dcache.c
index 2593153471cf..aac41adf4743 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -119,6 +119,7 @@ struct dentry_stat_t dentry_stat = {
119 119
120static DEFINE_PER_CPU(long, nr_dentry); 120static DEFINE_PER_CPU(long, nr_dentry);
121static DEFINE_PER_CPU(long, nr_dentry_unused); 121static DEFINE_PER_CPU(long, nr_dentry_unused);
122static DEFINE_PER_CPU(long, nr_dentry_negative);
122 123
123#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 124#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
124 125
@@ -152,11 +153,22 @@ static long get_nr_dentry_unused(void)
152 return sum < 0 ? 0 : sum; 153 return sum < 0 ? 0 : sum;
153} 154}
154 155
156static long get_nr_dentry_negative(void)
157{
158 int i;
159 long sum = 0;
160
161 for_each_possible_cpu(i)
162 sum += per_cpu(nr_dentry_negative, i);
163 return sum < 0 ? 0 : sum;
164}
165
155int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, 166int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
156 size_t *lenp, loff_t *ppos) 167 size_t *lenp, loff_t *ppos)
157{ 168{
158 dentry_stat.nr_dentry = get_nr_dentry(); 169 dentry_stat.nr_dentry = get_nr_dentry();
159 dentry_stat.nr_unused = get_nr_dentry_unused(); 170 dentry_stat.nr_unused = get_nr_dentry_unused();
171 dentry_stat.nr_negative = get_nr_dentry_negative();
160 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 172 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
161} 173}
162#endif 174#endif
@@ -317,6 +329,8 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
317 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); 329 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
318 WRITE_ONCE(dentry->d_flags, flags); 330 WRITE_ONCE(dentry->d_flags, flags);
319 dentry->d_inode = NULL; 331 dentry->d_inode = NULL;
332 if (dentry->d_flags & DCACHE_LRU_LIST)
333 this_cpu_inc(nr_dentry_negative);
320} 334}
321 335
322static void dentry_free(struct dentry *dentry) 336static void dentry_free(struct dentry *dentry)
@@ -371,6 +385,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
371 * The per-cpu "nr_dentry_unused" counters are updated with 385 * The per-cpu "nr_dentry_unused" counters are updated with
372 * the DCACHE_LRU_LIST bit. 386 * the DCACHE_LRU_LIST bit.
373 * 387 *
388 * The per-cpu "nr_dentry_negative" counters are only updated
389 * when deleted from or added to the per-superblock LRU list, not
390 * from/to the shrink list. That is to avoid an unneeded dec/inc
391 * pair when moving from LRU to shrink list in select_collect().
392 *
374 * These helper functions make sure we always follow the 393 * These helper functions make sure we always follow the
375 * rules. d_lock must be held by the caller. 394 * rules. d_lock must be held by the caller.
376 */ 395 */
@@ -380,6 +399,8 @@ static void d_lru_add(struct dentry *dentry)
380 D_FLAG_VERIFY(dentry, 0); 399 D_FLAG_VERIFY(dentry, 0);
381 dentry->d_flags |= DCACHE_LRU_LIST; 400 dentry->d_flags |= DCACHE_LRU_LIST;
382 this_cpu_inc(nr_dentry_unused); 401 this_cpu_inc(nr_dentry_unused);
402 if (d_is_negative(dentry))
403 this_cpu_inc(nr_dentry_negative);
383 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 404 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
384} 405}
385 406
@@ -388,6 +409,8 @@ static void d_lru_del(struct dentry *dentry)
388 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 409 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
389 dentry->d_flags &= ~DCACHE_LRU_LIST; 410 dentry->d_flags &= ~DCACHE_LRU_LIST;
390 this_cpu_dec(nr_dentry_unused); 411 this_cpu_dec(nr_dentry_unused);
412 if (d_is_negative(dentry))
413 this_cpu_dec(nr_dentry_negative);
391 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 414 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
392} 415}
393 416
@@ -418,6 +441,8 @@ static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
418 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 441 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
419 dentry->d_flags &= ~DCACHE_LRU_LIST; 442 dentry->d_flags &= ~DCACHE_LRU_LIST;
420 this_cpu_dec(nr_dentry_unused); 443 this_cpu_dec(nr_dentry_unused);
444 if (d_is_negative(dentry))
445 this_cpu_dec(nr_dentry_negative);
421 list_lru_isolate(lru, &dentry->d_lru); 446 list_lru_isolate(lru, &dentry->d_lru);
422} 447}
423 448
@@ -426,6 +451,8 @@ static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
426{ 451{
427 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 452 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
428 dentry->d_flags |= DCACHE_SHRINK_LIST; 453 dentry->d_flags |= DCACHE_SHRINK_LIST;
454 if (d_is_negative(dentry))
455 this_cpu_dec(nr_dentry_negative);
429 list_lru_isolate_move(lru, &dentry->d_lru, list); 456 list_lru_isolate_move(lru, &dentry->d_lru, list);
430} 457}
431 458
@@ -1188,15 +1215,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1188 */ 1215 */
1189void shrink_dcache_sb(struct super_block *sb) 1216void shrink_dcache_sb(struct super_block *sb)
1190{ 1217{
1191 long freed;
1192
1193 do { 1218 do {
1194 LIST_HEAD(dispose); 1219 LIST_HEAD(dispose);
1195 1220
1196 freed = list_lru_walk(&sb->s_dentry_lru, 1221 list_lru_walk(&sb->s_dentry_lru,
1197 dentry_lru_isolate_shrink, &dispose, 1024); 1222 dentry_lru_isolate_shrink, &dispose, 1024);
1198
1199 this_cpu_sub(nr_dentry_unused, freed);
1200 shrink_dentry_list(&dispose); 1223 shrink_dentry_list(&dispose);
1201 } while (list_lru_count(&sb->s_dentry_lru) > 0); 1224 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1202} 1225}
@@ -1820,6 +1843,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1820 WARN_ON(d_in_lookup(dentry)); 1843 WARN_ON(d_in_lookup(dentry));
1821 1844
1822 spin_lock(&dentry->d_lock); 1845 spin_lock(&dentry->d_lock);
1846 /*
1847 * Decrement negative dentry count if it was in the LRU list.
1848 */
1849 if (dentry->d_flags & DCACHE_LRU_LIST)
1850 this_cpu_dec(nr_dentry_negative);
1823 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); 1851 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1824 raw_write_seqcount_begin(&dentry->d_seq); 1852 raw_write_seqcount_begin(&dentry->d_seq);
1825 __d_set_inode_and_type(dentry, inode, add_flags); 1853 __d_set_inode_and_type(dentry, inode, add_flags);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 13b01351dd1c..29c68c5d44d5 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -324,7 +324,7 @@ static struct dentry *failed_creating(struct dentry *dentry)
324 inode_unlock(d_inode(dentry->d_parent)); 324 inode_unlock(d_inode(dentry->d_parent));
325 dput(dentry); 325 dput(dentry);
326 simple_release_fs(&debugfs_mount, &debugfs_mount_count); 326 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
327 return NULL; 327 return ERR_PTR(-ENOMEM);
328} 328}
329 329
330static struct dentry *end_creating(struct dentry *dentry) 330static struct dentry *end_creating(struct dentry *dentry)
@@ -347,7 +347,7 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
347 dentry = start_creating(name, parent); 347 dentry = start_creating(name, parent);
348 348
349 if (IS_ERR(dentry)) 349 if (IS_ERR(dentry))
350 return NULL; 350 return dentry;
351 351
352 inode = debugfs_get_inode(dentry->d_sb); 352 inode = debugfs_get_inode(dentry->d_sb);
353 if (unlikely(!inode)) 353 if (unlikely(!inode))
@@ -386,7 +386,8 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
386 * This function will return a pointer to a dentry if it succeeds. This 386 * This function will return a pointer to a dentry if it succeeds. This
387 * pointer must be passed to the debugfs_remove() function when the file is 387 * pointer must be passed to the debugfs_remove() function when the file is
388 * to be removed (no automatic cleanup happens if your module is unloaded, 388 * to be removed (no automatic cleanup happens if your module is unloaded,
389 * you are responsible here.) If an error occurs, %NULL will be returned. 389 * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
390 * returned.
390 * 391 *
391 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 392 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
392 * returned. 393 * returned.
@@ -464,7 +465,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
464 * This function will return a pointer to a dentry if it succeeds. This 465 * This function will return a pointer to a dentry if it succeeds. This
465 * pointer must be passed to the debugfs_remove() function when the file is 466 * pointer must be passed to the debugfs_remove() function when the file is
466 * to be removed (no automatic cleanup happens if your module is unloaded, 467 * to be removed (no automatic cleanup happens if your module is unloaded,
467 * you are responsible here.) If an error occurs, %NULL will be returned. 468 * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
469 * returned.
468 * 470 *
469 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 471 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
470 * returned. 472 * returned.
@@ -495,7 +497,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
495 * This function will return a pointer to a dentry if it succeeds. This 497 * This function will return a pointer to a dentry if it succeeds. This
496 * pointer must be passed to the debugfs_remove() function when the file is 498 * pointer must be passed to the debugfs_remove() function when the file is
497 * to be removed (no automatic cleanup happens if your module is unloaded, 499 * to be removed (no automatic cleanup happens if your module is unloaded,
498 * you are responsible here.) If an error occurs, %NULL will be returned. 500 * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
501 * returned.
499 * 502 *
500 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 503 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
501 * returned. 504 * returned.
@@ -506,7 +509,7 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
506 struct inode *inode; 509 struct inode *inode;
507 510
508 if (IS_ERR(dentry)) 511 if (IS_ERR(dentry))
509 return NULL; 512 return dentry;
510 513
511 inode = debugfs_get_inode(dentry->d_sb); 514 inode = debugfs_get_inode(dentry->d_sb);
512 if (unlikely(!inode)) 515 if (unlikely(!inode))
@@ -545,7 +548,7 @@ struct dentry *debugfs_create_automount(const char *name,
545 struct inode *inode; 548 struct inode *inode;
546 549
547 if (IS_ERR(dentry)) 550 if (IS_ERR(dentry))
548 return NULL; 551 return dentry;
549 552
550 inode = debugfs_get_inode(dentry->d_sb); 553 inode = debugfs_get_inode(dentry->d_sb);
551 if (unlikely(!inode)) 554 if (unlikely(!inode))
@@ -581,8 +584,8 @@ EXPORT_SYMBOL(debugfs_create_automount);
581 * This function will return a pointer to a dentry if it succeeds. This 584 * This function will return a pointer to a dentry if it succeeds. This
582 * pointer must be passed to the debugfs_remove() function when the symbolic 585 * pointer must be passed to the debugfs_remove() function when the symbolic
583 * link is to be removed (no automatic cleanup happens if your module is 586 * link is to be removed (no automatic cleanup happens if your module is
584 * unloaded, you are responsible here.) If an error occurs, %NULL will be 587 * unloaded, you are responsible here.) If an error occurs, %ERR_PTR(-ERROR)
585 * returned. 588 * will be returned.
586 * 589 *
587 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 590 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
588 * returned. 591 * returned.
@@ -594,12 +597,12 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
594 struct inode *inode; 597 struct inode *inode;
595 char *link = kstrdup(target, GFP_KERNEL); 598 char *link = kstrdup(target, GFP_KERNEL);
596 if (!link) 599 if (!link)
597 return NULL; 600 return ERR_PTR(-ENOMEM);
598 601
599 dentry = start_creating(name, parent); 602 dentry = start_creating(name, parent);
600 if (IS_ERR(dentry)) { 603 if (IS_ERR(dentry)) {
601 kfree(link); 604 kfree(link);
602 return NULL; 605 return dentry;
603 } 606 }
604 607
605 inode = debugfs_get_inode(dentry->d_sb); 608 inode = debugfs_get_inode(dentry->d_sb);
@@ -787,6 +790,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
787 struct dentry *dentry = NULL, *trap; 790 struct dentry *dentry = NULL, *trap;
788 struct name_snapshot old_name; 791 struct name_snapshot old_name;
789 792
793 if (IS_ERR(old_dir))
794 return old_dir;
795 if (IS_ERR(new_dir))
796 return new_dir;
797 if (IS_ERR_OR_NULL(old_dentry))
798 return old_dentry;
799
790 trap = lock_rename(new_dir, old_dir); 800 trap = lock_rename(new_dir, old_dir);
791 /* Source or destination directories don't exist? */ 801 /* Source or destination directories don't exist? */
792 if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) 802 if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
@@ -820,7 +830,9 @@ exit:
820 if (dentry && !IS_ERR(dentry)) 830 if (dentry && !IS_ERR(dentry))
821 dput(dentry); 831 dput(dentry);
822 unlock_rename(new_dir, old_dir); 832 unlock_rename(new_dir, old_dir);
823 return NULL; 833 if (IS_ERR(dentry))
834 return dentry;
835 return ERR_PTR(-EINVAL);
824} 836}
825EXPORT_SYMBOL_GPL(debugfs_rename); 837EXPORT_SYMBOL_GPL(debugfs_rename);
826 838
diff --git a/fs/direct-io.c b/fs/direct-io.c
index dbc1a1f080ce..ec2fb6fe6d37 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -679,6 +679,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
679 unsigned long fs_count; /* Number of filesystem-sized blocks */ 679 unsigned long fs_count; /* Number of filesystem-sized blocks */
680 int create; 680 int create;
681 unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; 681 unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
682 loff_t i_size;
682 683
683 /* 684 /*
684 * If there was a memory error and we've overwritten all the 685 * If there was a memory error and we've overwritten all the
@@ -708,8 +709,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
708 */ 709 */
709 create = dio->op == REQ_OP_WRITE; 710 create = dio->op == REQ_OP_WRITE;
710 if (dio->flags & DIO_SKIP_HOLES) { 711 if (dio->flags & DIO_SKIP_HOLES) {
711 if (fs_startblk <= ((i_size_read(dio->inode) - 1) >> 712 i_size = i_size_read(dio->inode);
712 i_blkbits)) 713 if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
713 create = 0; 714 create = 0;
714 } 715 }
715 716
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 82377017130f..d31b6c72b476 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
21 spin_lock(&sb->s_inode_list_lock); 21 spin_lock(&sb->s_inode_list_lock);
22 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 22 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
23 spin_lock(&inode->i_lock); 23 spin_lock(&inode->i_lock);
24 /*
25 * We must skip inodes in unusual state. We may also skip
26 * inodes without pages but we deliberately won't in case
27 * we need to reschedule to avoid softlockups.
28 */
24 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 29 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
25 (inode->i_mapping->nrpages == 0)) { 30 (inode->i_mapping->nrpages == 0 && !need_resched())) {
26 spin_unlock(&inode->i_lock); 31 spin_unlock(&inode->i_lock);
27 continue; 32 continue;
28 } 33 }
@@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
30 spin_unlock(&inode->i_lock); 35 spin_unlock(&inode->i_lock);
31 spin_unlock(&sb->s_inode_list_lock); 36 spin_unlock(&sb->s_inode_list_lock);
32 37
38 cond_resched();
33 invalidate_mapping_pages(inode->i_mapping, 0, -1); 39 invalidate_mapping_pages(inode->i_mapping, 0, -1);
34 iput(toput_inode); 40 iput(toput_inode);
35 toput_inode = inode; 41 toput_inode = inode;
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 712f00995390..5508baa11bb6 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
116 goto out; 116 goto out;
117 } 117 }
118 118
119 ret = file_write_and_wait_range(file, start, end);
120 if (ret)
121 return ret;
122
123 if (!journal) { 119 if (!journal) {
124 struct writeback_control wbc = { 120 ret = __generic_file_fsync(file, start, end, datasync);
125 .sync_mode = WB_SYNC_ALL
126 };
127
128 ret = ext4_write_inode(inode, &wbc);
129 if (!ret) 121 if (!ret)
130 ret = ext4_sync_parent(inode); 122 ret = ext4_sync_parent(inode);
131 if (test_opt(inode->i_sb, BARRIER)) 123 if (test_opt(inode->i_sb, BARRIER))
@@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
133 goto out; 125 goto out;
134 } 126 }
135 127
128 ret = file_write_and_wait_range(file, start, end);
129 if (ret)
130 return ret;
136 /* 131 /*
137 * data=writeback,ordered: 132 * data=writeback,ordered:
138 * The caller's filemap_fdatawrite()/wait will sync the data. 133 * The caller's filemap_fdatawrite()/wait will sync the data.
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index b40168fcc94a..36855c1f8daf 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -331,11 +331,22 @@ struct inode_switch_wbs_context {
331 struct work_struct work; 331 struct work_struct work;
332}; 332};
333 333
334static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
335{
336 down_write(&bdi->wb_switch_rwsem);
337}
338
339static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
340{
341 up_write(&bdi->wb_switch_rwsem);
342}
343
334static void inode_switch_wbs_work_fn(struct work_struct *work) 344static void inode_switch_wbs_work_fn(struct work_struct *work)
335{ 345{
336 struct inode_switch_wbs_context *isw = 346 struct inode_switch_wbs_context *isw =
337 container_of(work, struct inode_switch_wbs_context, work); 347 container_of(work, struct inode_switch_wbs_context, work);
338 struct inode *inode = isw->inode; 348 struct inode *inode = isw->inode;
349 struct backing_dev_info *bdi = inode_to_bdi(inode);
339 struct address_space *mapping = inode->i_mapping; 350 struct address_space *mapping = inode->i_mapping;
340 struct bdi_writeback *old_wb = inode->i_wb; 351 struct bdi_writeback *old_wb = inode->i_wb;
341 struct bdi_writeback *new_wb = isw->new_wb; 352 struct bdi_writeback *new_wb = isw->new_wb;
@@ -344,6 +355,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
344 bool switched = false; 355 bool switched = false;
345 356
346 /* 357 /*
358 * If @inode switches cgwb membership while sync_inodes_sb() is
359 * being issued, sync_inodes_sb() might miss it. Synchronize.
360 */
361 down_read(&bdi->wb_switch_rwsem);
362
363 /*
347 * By the time control reaches here, RCU grace period has passed 364 * By the time control reaches here, RCU grace period has passed
348 * since I_WB_SWITCH assertion and all wb stat update transactions 365 * since I_WB_SWITCH assertion and all wb stat update transactions
349 * between unlocked_inode_to_wb_begin/end() are guaranteed to be 366 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
@@ -428,6 +445,8 @@ skip_switch:
428 spin_unlock(&new_wb->list_lock); 445 spin_unlock(&new_wb->list_lock);
429 spin_unlock(&old_wb->list_lock); 446 spin_unlock(&old_wb->list_lock);
430 447
448 up_read(&bdi->wb_switch_rwsem);
449
431 if (switched) { 450 if (switched) {
432 wb_wakeup(new_wb); 451 wb_wakeup(new_wb);
433 wb_put(old_wb); 452 wb_put(old_wb);
@@ -468,9 +487,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
468 if (inode->i_state & I_WB_SWITCH) 487 if (inode->i_state & I_WB_SWITCH)
469 return; 488 return;
470 489
490 /*
491 * Avoid starting new switches while sync_inodes_sb() is in
492 * progress. Otherwise, if the down_write protected issue path
493 * blocks heavily, we might end up starting a large number of
494 * switches which will block on the rwsem.
495 */
496 if (!down_read_trylock(&bdi->wb_switch_rwsem))
497 return;
498
471 isw = kzalloc(sizeof(*isw), GFP_ATOMIC); 499 isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
472 if (!isw) 500 if (!isw)
473 return; 501 goto out_unlock;
474 502
475 /* find and pin the new wb */ 503 /* find and pin the new wb */
476 rcu_read_lock(); 504 rcu_read_lock();
@@ -504,12 +532,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
504 * Let's continue after I_WB_SWITCH is guaranteed to be visible. 532 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
505 */ 533 */
506 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); 534 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
507 return; 535 goto out_unlock;
508 536
509out_free: 537out_free:
510 if (isw->new_wb) 538 if (isw->new_wb)
511 wb_put(isw->new_wb); 539 wb_put(isw->new_wb);
512 kfree(isw); 540 kfree(isw);
541out_unlock:
542 up_read(&bdi->wb_switch_rwsem);
513} 543}
514 544
515/** 545/**
@@ -887,6 +917,9 @@ fs_initcall(cgroup_writeback_init);
887 917
888#else /* CONFIG_CGROUP_WRITEBACK */ 918#else /* CONFIG_CGROUP_WRITEBACK */
889 919
920static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
921static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
922
890static struct bdi_writeback * 923static struct bdi_writeback *
891locked_inode_to_wb_and_lock_list(struct inode *inode) 924locked_inode_to_wb_and_lock_list(struct inode *inode)
892 __releases(&inode->i_lock) 925 __releases(&inode->i_lock)
@@ -2413,8 +2446,11 @@ void sync_inodes_sb(struct super_block *sb)
2413 return; 2446 return;
2414 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2447 WARN_ON(!rwsem_is_locked(&sb->s_umount));
2415 2448
2449 /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
2450 bdi_down_write_wb_switch_rwsem(bdi);
2416 bdi_split_work_to_wbs(bdi, &work, false); 2451 bdi_split_work_to_wbs(bdi, &work, false);
2417 wb_wait_for_completion(bdi, &done); 2452 wb_wait_for_completion(bdi, &done);
2453 bdi_up_write_wb_switch_rwsem(bdi);
2418 2454
2419 wait_sb_inodes(sb); 2455 wait_sb_inodes(sb);
2420} 2456}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a5e516a40e7a..809c0f2f9942 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1742,7 +1742,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1742 req->in.h.nodeid = outarg->nodeid; 1742 req->in.h.nodeid = outarg->nodeid;
1743 req->in.numargs = 2; 1743 req->in.numargs = 2;
1744 req->in.argpages = 1; 1744 req->in.argpages = 1;
1745 req->page_descs[0].offset = offset;
1746 req->end = fuse_retrieve_end; 1745 req->end = fuse_retrieve_end;
1747 1746
1748 index = outarg->offset >> PAGE_SHIFT; 1747 index = outarg->offset >> PAGE_SHIFT;
@@ -1757,6 +1756,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1757 1756
1758 this_num = min_t(unsigned, num, PAGE_SIZE - offset); 1757 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1759 req->pages[req->num_pages] = page; 1758 req->pages[req->num_pages] = page;
1759 req->page_descs[req->num_pages].offset = offset;
1760 req->page_descs[req->num_pages].length = this_num; 1760 req->page_descs[req->num_pages].length = this_num;
1761 req->num_pages++; 1761 req->num_pages++;
1762 1762
@@ -2077,8 +2077,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2077 2077
2078 ret = fuse_dev_do_write(fud, &cs, len); 2078 ret = fuse_dev_do_write(fud, &cs, len);
2079 2079
2080 pipe_lock(pipe);
2080 for (idx = 0; idx < nbuf; idx++) 2081 for (idx = 0; idx < nbuf; idx++)
2081 pipe_buf_release(pipe, &bufs[idx]); 2082 pipe_buf_release(pipe, &bufs[idx]);
2083 pipe_unlock(pipe);
2082 2084
2083out: 2085out:
2084 kvfree(bufs); 2086 kvfree(bufs);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ffaffe18352a..a59c16bd90ac 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1782,7 +1782,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1782 spin_unlock(&fc->lock); 1782 spin_unlock(&fc->lock);
1783 1783
1784 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1784 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1785 dec_node_page_state(page, NR_WRITEBACK_TEMP); 1785 dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
1786 wb_writeout_inc(&bdi->wb); 1786 wb_writeout_inc(&bdi->wb);
1787 fuse_writepage_free(fc, new_req); 1787 fuse_writepage_free(fc, new_req);
1788 fuse_request_free(new_req); 1788 fuse_request_free(new_req);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 76baaa6be393..c2d4099429be 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -628,6 +628,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
628 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); 628 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
629 fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); 629 fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
630 fc->user_ns = get_user_ns(user_ns); 630 fc->user_ns = get_user_ns(user_ns);
631 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
631} 632}
632EXPORT_SYMBOL_GPL(fuse_conn_init); 633EXPORT_SYMBOL_GPL(fuse_conn_init);
633 634
@@ -1162,7 +1163,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1162 fc->user_id = d.user_id; 1163 fc->user_id = d.user_id;
1163 fc->group_id = d.group_id; 1164 fc->group_id = d.group_id;
1164 fc->max_read = max_t(unsigned, 4096, d.max_read); 1165 fc->max_read = max_t(unsigned, 4096, d.max_read);
1165 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
1166 1166
1167 /* Used by get_root_inode() */ 1167 /* Used by get_root_inode() */
1168 sb->s_fs_info = fc; 1168 sb->s_fs_info = fc;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index f15b4c57c4bd..78510ab91835 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -28,7 +28,6 @@
28#include "util.h" 28#include "util.h"
29#include "trans.h" 29#include "trans.h"
30#include "dir.h" 30#include "dir.h"
31#include "lops.h"
32 31
33struct workqueue_struct *gfs2_freeze_wq; 32struct workqueue_struct *gfs2_freeze_wq;
34 33
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 5bfaf381921a..b8830fda51e8 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -733,7 +733,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
733 lh->lh_crc = cpu_to_be32(crc); 733 lh->lh_crc = cpu_to_be32(crc);
734 734
735 gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); 735 gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr);
736 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags); 736 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, op_flags);
737 log_flush_wait(sdp); 737 log_flush_wait(sdp);
738} 738}
739 739
@@ -810,7 +810,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
810 810
811 gfs2_ordered_write(sdp); 811 gfs2_ordered_write(sdp);
812 lops_before_commit(sdp, tr); 812 lops_before_commit(sdp, tr);
813 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE); 813 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, 0);
814 814
815 if (sdp->sd_log_head != sdp->sd_log_flush_head) { 815 if (sdp->sd_log_head != sdp->sd_log_flush_head) {
816 log_flush_wait(sdp); 816 log_flush_wait(sdp);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 94dcab655bc0..2295042bc625 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -17,9 +17,7 @@
17#include <linux/bio.h> 17#include <linux/bio.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/list_sort.h> 19#include <linux/list_sort.h>
20#include <linux/blkdev.h>
21 20
22#include "bmap.h"
23#include "dir.h" 21#include "dir.h"
24#include "gfs2.h" 22#include "gfs2.h"
25#include "incore.h" 23#include "incore.h"
@@ -195,6 +193,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
195/** 193/**
196 * gfs2_end_log_write - end of i/o to the log 194 * gfs2_end_log_write - end of i/o to the log
197 * @bio: The bio 195 * @bio: The bio
196 * @error: Status of i/o request
198 * 197 *
199 * Each bio_vec contains either data from the pagecache or data 198 * Each bio_vec contains either data from the pagecache or data
200 * relating to the log itself. Here we iterate over the bio_vec 199 * relating to the log itself. Here we iterate over the bio_vec
@@ -231,19 +230,20 @@ static void gfs2_end_log_write(struct bio *bio)
231/** 230/**
232 * gfs2_log_submit_bio - Submit any pending log bio 231 * gfs2_log_submit_bio - Submit any pending log bio
233 * @biop: Address of the bio pointer 232 * @biop: Address of the bio pointer
234 * @opf: REQ_OP | op_flags 233 * @op: REQ_OP
234 * @op_flags: req_flag_bits
235 * 235 *
236 * Submit any pending part-built or full bio to the block device. If 236 * Submit any pending part-built or full bio to the block device. If
237 * there is no pending bio, then this is a no-op. 237 * there is no pending bio, then this is a no-op.
238 */ 238 */
239 239
240void gfs2_log_submit_bio(struct bio **biop, int opf) 240void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags)
241{ 241{
242 struct bio *bio = *biop; 242 struct bio *bio = *biop;
243 if (bio) { 243 if (bio) {
244 struct gfs2_sbd *sdp = bio->bi_private; 244 struct gfs2_sbd *sdp = bio->bi_private;
245 atomic_inc(&sdp->sd_log_in_flight); 245 atomic_inc(&sdp->sd_log_in_flight);
246 bio->bi_opf = opf; 246 bio_set_op_attrs(bio, op, op_flags);
247 submit_bio(bio); 247 submit_bio(bio);
248 *biop = NULL; 248 *biop = NULL;
249 } 249 }
@@ -304,7 +304,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
304 nblk >>= sdp->sd_fsb2bb_shift; 304 nblk >>= sdp->sd_fsb2bb_shift;
305 if (blkno == nblk && !flush) 305 if (blkno == nblk && !flush)
306 return bio; 306 return bio;
307 gfs2_log_submit_bio(biop, op); 307 gfs2_log_submit_bio(biop, op, 0);
308 } 308 }
309 309
310 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); 310 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
@@ -375,184 +375,6 @@ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
375 gfs2_log_bmap(sdp)); 375 gfs2_log_bmap(sdp));
376} 376}
377 377
378/**
379 * gfs2_end_log_read - end I/O callback for reads from the log
380 * @bio: The bio
381 *
382 * Simply unlock the pages in the bio. The main thread will wait on them and
383 * process them in order as necessary.
384 */
385
386static void gfs2_end_log_read(struct bio *bio)
387{
388 struct page *page;
389 struct bio_vec *bvec;
390 int i;
391
392 bio_for_each_segment_all(bvec, bio, i) {
393 page = bvec->bv_page;
394 if (bio->bi_status) {
395 int err = blk_status_to_errno(bio->bi_status);
396
397 SetPageError(page);
398 mapping_set_error(page->mapping, err);
399 }
400 unlock_page(page);
401 }
402
403 bio_put(bio);
404}
405
406/**
407 * gfs2_jhead_pg_srch - Look for the journal head in a given page.
408 * @jd: The journal descriptor
409 * @page: The page to look in
410 *
411 * Returns: 1 if found, 0 otherwise.
412 */
413
414static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
415 struct gfs2_log_header_host *head,
416 struct page *page)
417{
418 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
419 struct gfs2_log_header_host uninitialized_var(lh);
420 void *kaddr = kmap_atomic(page);
421 unsigned int offset;
422 bool ret = false;
423
424 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
425 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
426 if (lh.lh_sequence > head->lh_sequence)
427 *head = lh;
428 else {
429 ret = true;
430 break;
431 }
432 }
433 }
434 kunmap_atomic(kaddr);
435 return ret;
436}
437
438/**
439 * gfs2_jhead_process_page - Search/cleanup a page
440 * @jd: The journal descriptor
441 * @index: Index of the page to look into
442 * @done: If set, perform only cleanup, else search and set if found.
443 *
444 * Find the page with 'index' in the journal's mapping. Search the page for
445 * the journal head if requested (cleanup == false). Release refs on the
446 * page so the page cache can reclaim it (put_page() twice). We grabbed a
447 * reference on this page two times, first when we did a find_or_create_page()
448 * to obtain the page to add it to the bio and second when we do a
449 * find_get_page() here to get the page to wait on while I/O on it is being
450 * completed.
451 * This function is also used to free up a page we might've grabbed but not
452 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
453 * submitted the I/O, but we already found the jhead so we only need to drop
454 * our references to the page.
455 */
456
457static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
458 struct gfs2_log_header_host *head,
459 bool *done)
460{
461 struct page *page;
462
463 page = find_get_page(jd->jd_inode->i_mapping, index);
464 wait_on_page_locked(page);
465
466 if (PageError(page))
467 *done = true;
468
469 if (!*done)
470 *done = gfs2_jhead_pg_srch(jd, head, page);
471
472 put_page(page); /* Once for find_get_page */
473 put_page(page); /* Once more for find_or_create_page */
474}
475
476/**
477 * gfs2_find_jhead - find the head of a log
478 * @jd: The journal descriptor
479 * @head: The log descriptor for the head of the log is returned here
480 *
481 * Do a search of a journal by reading it in large chunks using bios and find
482 * the valid log entry with the highest sequence number. (i.e. the log head)
483 *
484 * Returns: 0 on success, errno otherwise
485 */
486
487int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
488{
489 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
490 struct address_space *mapping = jd->jd_inode->i_mapping;
491 struct gfs2_journal_extent *je;
492 u32 block, read_idx = 0, submit_idx = 0, index = 0;
493 int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
494 int blocks_per_page = 1 << shift, sz, ret = 0;
495 struct bio *bio = NULL;
496 struct page *page;
497 bool done = false;
498 errseq_t since;
499
500 memset(head, 0, sizeof(*head));
501 if (list_empty(&jd->extent_list))
502 gfs2_map_journal_extents(sdp, jd);
503
504 since = filemap_sample_wb_err(mapping);
505 list_for_each_entry(je, &jd->extent_list, list) {
506 for (block = 0; block < je->blocks; block += blocks_per_page) {
507 index = (je->lblock + block) >> shift;
508
509 page = find_or_create_page(mapping, index, GFP_NOFS);
510 if (!page) {
511 ret = -ENOMEM;
512 done = true;
513 goto out;
514 }
515
516 if (bio) {
517 sz = bio_add_page(bio, page, PAGE_SIZE, 0);
518 if (sz == PAGE_SIZE)
519 goto page_added;
520 submit_idx = index;
521 submit_bio(bio);
522 bio = NULL;
523 }
524
525 bio = gfs2_log_alloc_bio(sdp,
526 je->dblock + (index << shift),
527 gfs2_end_log_read);
528 bio->bi_opf = REQ_OP_READ;
529 sz = bio_add_page(bio, page, PAGE_SIZE, 0);
530 gfs2_assert_warn(sdp, sz == PAGE_SIZE);
531
532page_added:
533 if (submit_idx <= read_idx + BIO_MAX_PAGES) {
534 /* Keep at least one bio in flight */
535 continue;
536 }
537
538 gfs2_jhead_process_page(jd, read_idx++, head, &done);
539 if (done)
540 goto out; /* found */
541 }
542 }
543
544out:
545 if (bio)
546 submit_bio(bio);
547 while (read_idx <= index)
548 gfs2_jhead_process_page(jd, read_idx++, head, &done);
549
550 if (!ret)
551 ret = filemap_check_wb_err(mapping, since);
552
553 return ret;
554}
555
556static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, 378static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
557 u32 ld_length, u32 ld_data1) 379 u32 ld_length, u32 ld_data1)
558{ 380{
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 331160fc568b..711c4d89c063 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -30,10 +30,8 @@ extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp);
30extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, 30extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
31 unsigned size, unsigned offset, u64 blkno); 31 unsigned size, unsigned offset, u64 blkno);
32extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); 32extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
33extern void gfs2_log_submit_bio(struct bio **biop, int opf); 33extern void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags);
34extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); 34extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
35extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
36 struct gfs2_log_header_host *head);
37 35
38static inline unsigned int buf_limit(struct gfs2_sbd *sdp) 36static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
39{ 37{
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 1179763f6370..b041cb8ae383 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -41,7 +41,6 @@
41#include "dir.h" 41#include "dir.h"
42#include "meta_io.h" 42#include "meta_io.h"
43#include "trace_gfs2.h" 43#include "trace_gfs2.h"
44#include "lops.h"
45 44
46#define DO 0 45#define DO 0
47#define UNDO 1 46#define UNDO 1
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 7389e445a7a7..2dac43065382 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -182,6 +182,129 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
182} 182}
183 183
184/** 184/**
185 * find_good_lh - find a good log header
186 * @jd: the journal
187 * @blk: the segment to start searching from
188 * @lh: the log header to fill in
189 * @forward: if true search forward in the log, else search backward
190 *
191 * Call get_log_header() to get a log header for a segment, but if the
192 * segment is bad, either scan forward or backward until we find a good one.
193 *
194 * Returns: errno
195 */
196
197static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
198 struct gfs2_log_header_host *head)
199{
200 unsigned int orig_blk = *blk;
201 int error;
202
203 for (;;) {
204 error = get_log_header(jd, *blk, head);
205 if (error <= 0)
206 return error;
207
208 if (++*blk == jd->jd_blocks)
209 *blk = 0;
210
211 if (*blk == orig_blk) {
212 gfs2_consist_inode(GFS2_I(jd->jd_inode));
213 return -EIO;
214 }
215 }
216}
217
218/**
219 * jhead_scan - make sure we've found the head of the log
220 * @jd: the journal
221 * @head: this is filled in with the log descriptor of the head
222 *
223 * At this point, seg and lh should be either the head of the log or just
224 * before. Scan forward until we find the head.
225 *
226 * Returns: errno
227 */
228
229static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
230{
231 unsigned int blk = head->lh_blkno;
232 struct gfs2_log_header_host lh;
233 int error;
234
235 for (;;) {
236 if (++blk == jd->jd_blocks)
237 blk = 0;
238
239 error = get_log_header(jd, blk, &lh);
240 if (error < 0)
241 return error;
242 if (error == 1)
243 continue;
244
245 if (lh.lh_sequence == head->lh_sequence) {
246 gfs2_consist_inode(GFS2_I(jd->jd_inode));
247 return -EIO;
248 }
249 if (lh.lh_sequence < head->lh_sequence)
250 break;
251
252 *head = lh;
253 }
254
255 return 0;
256}
257
258/**
259 * gfs2_find_jhead - find the head of a log
260 * @jd: the journal
261 * @head: the log descriptor for the head of the log is returned here
262 *
263 * Do a binary search of a journal and find the valid log entry with the
264 * highest sequence number. (i.e. the log head)
265 *
266 * Returns: errno
267 */
268
269int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
270{
271 struct gfs2_log_header_host lh_1, lh_m;
272 u32 blk_1, blk_2, blk_m;
273 int error;
274
275 blk_1 = 0;
276 blk_2 = jd->jd_blocks - 1;
277
278 for (;;) {
279 blk_m = (blk_1 + blk_2) / 2;
280
281 error = find_good_lh(jd, &blk_1, &lh_1);
282 if (error)
283 return error;
284
285 error = find_good_lh(jd, &blk_m, &lh_m);
286 if (error)
287 return error;
288
289 if (blk_1 == blk_m || blk_m == blk_2)
290 break;
291
292 if (lh_1.lh_sequence <= lh_m.lh_sequence)
293 blk_1 = blk_m;
294 else
295 blk_2 = blk_m;
296 }
297
298 error = jhead_scan(jd, &lh_1);
299 if (error)
300 return error;
301
302 *head = lh_1;
303
304 return error;
305}
306
307/**
185 * foreach_descriptor - go through the active part of the log 308 * foreach_descriptor - go through the active part of the log
186 * @jd: the journal 309 * @jd: the journal
187 * @start: the first log header in the active region 310 * @start: the first log header in the active region
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 99575ab81202..11d81248be85 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -27,6 +27,8 @@ extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
27extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where); 27extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
28extern void gfs2_revoke_clean(struct gfs2_jdesc *jd); 28extern void gfs2_revoke_clean(struct gfs2_jdesc *jd);
29 29
30extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
31 struct gfs2_log_header_host *head);
30extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); 32extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
31extern void gfs2_recover_func(struct work_struct *work); 33extern void gfs2_recover_func(struct work_struct *work);
32extern int __get_log_header(struct gfs2_sbd *sdp, 34extern int __get_log_header(struct gfs2_sbd *sdp,
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 831d7cb5a49c..17a8d3b43990 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1780 goto next_iter; 1780 goto next_iter;
1781 } 1781 }
1782 if (ret == -E2BIG) { 1782 if (ret == -E2BIG) {
1783 n += rbm->bii - initial_bii;
1784 rbm->bii = 0; 1783 rbm->bii = 0;
1785 rbm->offset = 0; 1784 rbm->offset = 0;
1785 n += (rbm->bii - initial_bii);
1786 goto res_covered_end_of_rgrp; 1786 goto res_covered_end_of_rgrp;
1787 } 1787 }
1788 return ret; 1788 return ret;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index d4b11c903971..ca71163ff7cf 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -45,7 +45,6 @@
45#include "util.h" 45#include "util.h"
46#include "sys.h" 46#include "sys.h"
47#include "xattr.h" 47#include "xattr.h"
48#include "lops.h"
49 48
50#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x) 49#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
51 50
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a2fcea5f8225..32920a10100e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -383,16 +383,17 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
383 * truncation is indicated by end of range being LLONG_MAX 383 * truncation is indicated by end of range being LLONG_MAX
384 * In this case, we first scan the range and release found pages. 384 * In this case, we first scan the range and release found pages.
385 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv 385 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
386 * maps and global counts. 386 * maps and global counts. Page faults can not race with truncation
387 * in this routine. hugetlb_no_page() prevents page faults in the
388 * truncated range. It checks i_size before allocation, and again after
389 * with the page table lock for the page held. The same lock must be
390 * acquired to unmap a page.
387 * hole punch is indicated if end is not LLONG_MAX 391 * hole punch is indicated if end is not LLONG_MAX
388 * In the hole punch case we scan the range and release found pages. 392 * In the hole punch case we scan the range and release found pages.
389 * Only when releasing a page is the associated region/reserv map 393 * Only when releasing a page is the associated region/reserv map
390 * deleted. The region/reserv map for ranges without associated 394 * deleted. The region/reserv map for ranges without associated
391 * pages are not modified. 395 * pages are not modified. Page faults can race with hole punch.
392 * 396 * This is indicated if we find a mapped page.
393 * Callers of this routine must hold the i_mmap_rwsem in write mode to prevent
394 * races with page faults.
395 *
396 * Note: If the passed end of range value is beyond the end of file, but 397 * Note: If the passed end of range value is beyond the end of file, but
397 * not LLONG_MAX this routine still performs a hole punch operation. 398 * not LLONG_MAX this routine still performs a hole punch operation.
398 */ 399 */
@@ -422,14 +423,32 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
422 423
423 for (i = 0; i < pagevec_count(&pvec); ++i) { 424 for (i = 0; i < pagevec_count(&pvec); ++i) {
424 struct page *page = pvec.pages[i]; 425 struct page *page = pvec.pages[i];
426 u32 hash;
425 427
426 index = page->index; 428 index = page->index;
429 hash = hugetlb_fault_mutex_hash(h, current->mm,
430 &pseudo_vma,
431 mapping, index, 0);
432 mutex_lock(&hugetlb_fault_mutex_table[hash]);
433
427 /* 434 /*
428 * A mapped page is impossible as callers should unmap 435 * If page is mapped, it was faulted in after being
429 * all references before calling. And, i_mmap_rwsem 436 * unmapped in caller. Unmap (again) now after taking
430 * prevents the creation of additional mappings. 437 * the fault mutex. The mutex will prevent faults
438 * until we finish removing the page.
439 *
440 * This race can only happen in the hole punch case.
441 * Getting here in a truncate operation is a bug.
431 */ 442 */
432 VM_BUG_ON(page_mapped(page)); 443 if (unlikely(page_mapped(page))) {
444 BUG_ON(truncate_op);
445
446 i_mmap_lock_write(mapping);
447 hugetlb_vmdelete_list(&mapping->i_mmap,
448 index * pages_per_huge_page(h),
449 (index + 1) * pages_per_huge_page(h));
450 i_mmap_unlock_write(mapping);
451 }
433 452
434 lock_page(page); 453 lock_page(page);
435 /* 454 /*
@@ -451,6 +470,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
451 } 470 }
452 471
453 unlock_page(page); 472 unlock_page(page);
473 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
454 } 474 }
455 huge_pagevec_release(&pvec); 475 huge_pagevec_release(&pvec);
456 cond_resched(); 476 cond_resched();
@@ -462,20 +482,9 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
462 482
463static void hugetlbfs_evict_inode(struct inode *inode) 483static void hugetlbfs_evict_inode(struct inode *inode)
464{ 484{
465 struct address_space *mapping = inode->i_mapping;
466 struct resv_map *resv_map; 485 struct resv_map *resv_map;
467 486
468 /*
469 * The vfs layer guarantees that there are no other users of this
470 * inode. Therefore, it would be safe to call remove_inode_hugepages
471 * without holding i_mmap_rwsem. We acquire and hold here to be
472 * consistent with other callers. Since there will be no contention
473 * on the semaphore, overhead is negligible.
474 */
475 i_mmap_lock_write(mapping);
476 remove_inode_hugepages(inode, 0, LLONG_MAX); 487 remove_inode_hugepages(inode, 0, LLONG_MAX);
477 i_mmap_unlock_write(mapping);
478
479 resv_map = (struct resv_map *)inode->i_mapping->private_data; 488 resv_map = (struct resv_map *)inode->i_mapping->private_data;
480 /* root inode doesn't have the resv_map, so we should check it */ 489 /* root inode doesn't have the resv_map, so we should check it */
481 if (resv_map) 490 if (resv_map)
@@ -496,8 +505,8 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
496 i_mmap_lock_write(mapping); 505 i_mmap_lock_write(mapping);
497 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 506 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
498 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); 507 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
499 remove_inode_hugepages(inode, offset, LLONG_MAX);
500 i_mmap_unlock_write(mapping); 508 i_mmap_unlock_write(mapping);
509 remove_inode_hugepages(inode, offset, LLONG_MAX);
501 return 0; 510 return 0;
502} 511}
503 512
@@ -531,8 +540,8 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
531 hugetlb_vmdelete_list(&mapping->i_mmap, 540 hugetlb_vmdelete_list(&mapping->i_mmap,
532 hole_start >> PAGE_SHIFT, 541 hole_start >> PAGE_SHIFT,
533 hole_end >> PAGE_SHIFT); 542 hole_end >> PAGE_SHIFT);
534 remove_inode_hugepages(inode, hole_start, hole_end);
535 i_mmap_unlock_write(mapping); 543 i_mmap_unlock_write(mapping);
544 remove_inode_hugepages(inode, hole_start, hole_end);
536 inode_unlock(inode); 545 inode_unlock(inode);
537 } 546 }
538 547
@@ -615,11 +624,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
615 /* addr is the offset within the file (zero based) */ 624 /* addr is the offset within the file (zero based) */
616 addr = index * hpage_size; 625 addr = index * hpage_size;
617 626
618 /* 627 /* mutex taken here, fault path and hole punch */
619 * fault mutex taken here, protects against fault path
620 * and hole punch. inode_lock previously taken protects
621 * against truncation.
622 */
623 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, 628 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
624 index, addr); 629 index, addr);
625 mutex_lock(&hugetlb_fault_mutex_table[hash]); 630 mutex_lock(&hugetlb_fault_mutex_table[hash]);
diff --git a/fs/inode.c b/fs/inode.c
index 0cd47fe0dbe5..73432e64f874 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
730 return LRU_REMOVED; 730 return LRU_REMOVED;
731 } 731 }
732 732
733 /* 733 /* recently referenced inodes get one more pass */
734 * Recently referenced inodes and inodes with many attached pages 734 if (inode->i_state & I_REFERENCED) {
735 * get one more pass.
736 */
737 if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
738 inode->i_state &= ~I_REFERENCED; 735 inode->i_state &= ~I_REFERENCED;
739 spin_unlock(&inode->i_lock); 736 spin_unlock(&inode->i_lock);
740 return LRU_ROTATE; 737 return LRU_ROTATE;
diff --git a/fs/iomap.c b/fs/iomap.c
index a3088fae567b..897c60215dd1 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page)
116 atomic_set(&iop->read_count, 0); 116 atomic_set(&iop->read_count, 0);
117 atomic_set(&iop->write_count, 0); 117 atomic_set(&iop->write_count, 0);
118 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); 118 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
119
120 /*
121 * migrate_page_move_mapping() assumes that pages with private data have
122 * their count elevated by 1.
123 */
124 get_page(page);
119 set_page_private(page, (unsigned long)iop); 125 set_page_private(page, (unsigned long)iop);
120 SetPagePrivate(page); 126 SetPagePrivate(page);
121 return iop; 127 return iop;
@@ -132,6 +138,7 @@ iomap_page_release(struct page *page)
132 WARN_ON_ONCE(atomic_read(&iop->write_count)); 138 WARN_ON_ONCE(atomic_read(&iop->write_count));
133 ClearPagePrivate(page); 139 ClearPagePrivate(page);
134 set_page_private(page, 0); 140 set_page_private(page, 0);
141 put_page(page);
135 kfree(iop); 142 kfree(iop);
136} 143}
137 144
@@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
569 576
570 if (page_has_private(page)) { 577 if (page_has_private(page)) {
571 ClearPagePrivate(page); 578 ClearPagePrivate(page);
579 get_page(newpage);
572 set_page_private(newpage, page_private(page)); 580 set_page_private(newpage, page_private(page));
573 set_page_private(page, 0); 581 set_page_private(page, 0);
582 put_page(page);
574 SetPagePrivate(newpage); 583 SetPagePrivate(newpage);
575 } 584 }
576 585
@@ -1804,6 +1813,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1804 loff_t pos = iocb->ki_pos, start = pos; 1813 loff_t pos = iocb->ki_pos, start = pos;
1805 loff_t end = iocb->ki_pos + count - 1, ret = 0; 1814 loff_t end = iocb->ki_pos + count - 1, ret = 0;
1806 unsigned int flags = IOMAP_DIRECT; 1815 unsigned int flags = IOMAP_DIRECT;
1816 bool wait_for_completion = is_sync_kiocb(iocb);
1807 struct blk_plug plug; 1817 struct blk_plug plug;
1808 struct iomap_dio *dio; 1818 struct iomap_dio *dio;
1809 1819
@@ -1823,7 +1833,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1823 dio->end_io = end_io; 1833 dio->end_io = end_io;
1824 dio->error = 0; 1834 dio->error = 0;
1825 dio->flags = 0; 1835 dio->flags = 0;
1826 dio->wait_for_completion = is_sync_kiocb(iocb);
1827 1836
1828 dio->submit.iter = iter; 1837 dio->submit.iter = iter;
1829 dio->submit.waiter = current; 1838 dio->submit.waiter = current;
@@ -1878,7 +1887,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1878 dio_warn_stale_pagecache(iocb->ki_filp); 1887 dio_warn_stale_pagecache(iocb->ki_filp);
1879 ret = 0; 1888 ret = 0;
1880 1889
1881 if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion && 1890 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
1882 !inode->i_sb->s_dio_done_wq) { 1891 !inode->i_sb->s_dio_done_wq) {
1883 ret = sb_init_dio_done_wq(inode->i_sb); 1892 ret = sb_init_dio_done_wq(inode->i_sb);
1884 if (ret < 0) 1893 if (ret < 0)
@@ -1894,7 +1903,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1894 if (ret <= 0) { 1903 if (ret <= 0) {
1895 /* magic error code to fall back to buffered I/O */ 1904 /* magic error code to fall back to buffered I/O */
1896 if (ret == -ENOTBLK) { 1905 if (ret == -ENOTBLK) {
1897 dio->wait_for_completion = true; 1906 wait_for_completion = true;
1898 ret = 0; 1907 ret = 0;
1899 } 1908 }
1900 break; 1909 break;
@@ -1916,8 +1925,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1916 if (dio->flags & IOMAP_DIO_WRITE_FUA) 1925 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1917 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 1926 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1918 1927
1928 /*
1929 * We are about to drop our additional submission reference, which
1930 * might be the last reference to the dio. There are three three
1931 * different ways we can progress here:
1932 *
1933 * (a) If this is the last reference we will always complete and free
1934 * the dio ourselves.
1935 * (b) If this is not the last reference, and we serve an asynchronous
1936 * iocb, we must never touch the dio after the decrement, the
1937 * I/O completion handler will complete and free it.
1938 * (c) If this is not the last reference, but we serve a synchronous
1939 * iocb, the I/O completion handler will wake us up on the drop
1940 * of the final reference, and we will complete and free it here
1941 * after we got woken by the I/O completion handler.
1942 */
1943 dio->wait_for_completion = wait_for_completion;
1919 if (!atomic_dec_and_test(&dio->ref)) { 1944 if (!atomic_dec_and_test(&dio->ref)) {
1920 if (!dio->wait_for_completion) 1945 if (!wait_for_completion)
1921 return -EIOCBQUEUED; 1946 return -EIOCBQUEUED;
1922 1947
1923 for (;;) { 1948 for (;;) {
@@ -1934,9 +1959,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1934 __set_current_state(TASK_RUNNING); 1959 __set_current_state(TASK_RUNNING);
1935 } 1960 }
1936 1961
1937 ret = iomap_dio_complete(dio); 1962 return iomap_dio_complete(dio);
1938
1939 return ret;
1940 1963
1941out_free_dio: 1964out_free_dio:
1942 kfree(dio); 1965 kfree(dio);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 46d691ba04bc..45b2322e092d 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -133,15 +133,9 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
133 struct file *file_out, loff_t pos_out, 133 struct file *file_out, loff_t pos_out,
134 size_t count, unsigned int flags) 134 size_t count, unsigned int flags)
135{ 135{
136 ssize_t ret;
137
138 if (file_inode(file_in) == file_inode(file_out)) 136 if (file_inode(file_in) == file_inode(file_out))
139 return -EINVAL; 137 return -EINVAL;
140retry: 138 return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
141 ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
142 if (ret == -EAGAIN)
143 goto retry;
144 return ret;
145} 139}
146 140
147static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) 141static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 3f23b6840547..bf34ddaa2ad7 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -44,6 +44,7 @@
44#include <linux/keyctl.h> 44#include <linux/keyctl.h>
45#include <linux/key-type.h> 45#include <linux/key-type.h>
46#include <keys/user-type.h> 46#include <keys/user-type.h>
47#include <keys/request_key_auth-type.h>
47#include <linux/module.h> 48#include <linux/module.h>
48 49
49#include "internal.h" 50#include "internal.h"
@@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy;
59struct idmap_legacy_upcalldata { 60struct idmap_legacy_upcalldata {
60 struct rpc_pipe_msg pipe_msg; 61 struct rpc_pipe_msg pipe_msg;
61 struct idmap_msg idmap_msg; 62 struct idmap_msg idmap_msg;
62 struct key_construction *key_cons; 63 struct key *authkey;
63 struct idmap *idmap; 64 struct idmap *idmap;
64}; 65};
65 66
@@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = {
384 { Opt_find_err, NULL } 385 { Opt_find_err, NULL }
385}; 386};
386 387
387static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *); 388static int nfs_idmap_legacy_upcall(struct key *, void *);
388static ssize_t idmap_pipe_downcall(struct file *, const char __user *, 389static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
389 size_t); 390 size_t);
390static void idmap_release_pipe(struct inode *); 391static void idmap_release_pipe(struct inode *);
@@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
549static void 550static void
550nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret) 551nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
551{ 552{
552 struct key_construction *cons = idmap->idmap_upcall_data->key_cons; 553 struct key *authkey = idmap->idmap_upcall_data->authkey;
553 554
554 kfree(idmap->idmap_upcall_data); 555 kfree(idmap->idmap_upcall_data);
555 idmap->idmap_upcall_data = NULL; 556 idmap->idmap_upcall_data = NULL;
556 complete_request_key(cons, ret); 557 complete_request_key(authkey, ret);
558 key_put(authkey);
557} 559}
558 560
559static void 561static void
@@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
563 nfs_idmap_complete_pipe_upcall_locked(idmap, ret); 565 nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
564} 566}
565 567
566static int nfs_idmap_legacy_upcall(struct key_construction *cons, 568static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
567 const char *op,
568 void *aux)
569{ 569{
570 struct idmap_legacy_upcalldata *data; 570 struct idmap_legacy_upcalldata *data;
571 struct request_key_auth *rka = get_request_key_auth(authkey);
571 struct rpc_pipe_msg *msg; 572 struct rpc_pipe_msg *msg;
572 struct idmap_msg *im; 573 struct idmap_msg *im;
573 struct idmap *idmap = (struct idmap *)aux; 574 struct idmap *idmap = (struct idmap *)aux;
574 struct key *key = cons->key; 575 struct key *key = rka->target_key;
575 int ret = -ENOKEY; 576 int ret = -ENOKEY;
576 577
577 if (!aux) 578 if (!aux)
@@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
586 msg = &data->pipe_msg; 587 msg = &data->pipe_msg;
587 im = &data->idmap_msg; 588 im = &data->idmap_msg;
588 data->idmap = idmap; 589 data->idmap = idmap;
589 data->key_cons = cons; 590 data->authkey = key_get(authkey);
590 591
591 ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); 592 ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
592 if (ret < 0) 593 if (ret < 0)
@@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
604out2: 605out2:
605 kfree(data); 606 kfree(data);
606out1: 607out1:
607 complete_request_key(cons, ret); 608 complete_request_key(authkey, ret);
608 return ret; 609 return ret;
609} 610}
610 611
@@ -651,9 +652,10 @@ out:
651static ssize_t 652static ssize_t
652idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) 653idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
653{ 654{
655 struct request_key_auth *rka;
654 struct rpc_inode *rpci = RPC_I(file_inode(filp)); 656 struct rpc_inode *rpci = RPC_I(file_inode(filp));
655 struct idmap *idmap = (struct idmap *)rpci->private; 657 struct idmap *idmap = (struct idmap *)rpci->private;
656 struct key_construction *cons; 658 struct key *authkey;
657 struct idmap_msg im; 659 struct idmap_msg im;
658 size_t namelen_in; 660 size_t namelen_in;
659 int ret = -ENOKEY; 661 int ret = -ENOKEY;
@@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
665 if (idmap->idmap_upcall_data == NULL) 667 if (idmap->idmap_upcall_data == NULL)
666 goto out_noupcall; 668 goto out_noupcall;
667 669
668 cons = idmap->idmap_upcall_data->key_cons; 670 authkey = idmap->idmap_upcall_data->authkey;
671 rka = get_request_key_auth(authkey);
669 672
670 if (mlen != sizeof(im)) { 673 if (mlen != sizeof(im)) {
671 ret = -ENOSPC; 674 ret = -ENOSPC;
@@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
690 693
691 ret = nfs_idmap_read_and_verify_message(&im, 694 ret = nfs_idmap_read_and_verify_message(&im,
692 &idmap->idmap_upcall_data->idmap_msg, 695 &idmap->idmap_upcall_data->idmap_msg,
693 cons->key, cons->authkey); 696 rka->target_key, authkey);
694 if (ret >= 0) { 697 if (ret >= 0) {
695 key_set_timeout(cons->key, nfs_idmap_cache_timeout); 698 key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
696 ret = mlen; 699 ret = mlen;
697 } 700 }
698 701
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 22ce3c8a2f46..0570391eaa16 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1895,6 +1895,11 @@ static int nfs_parse_devname(const char *dev_name,
1895 size_t len; 1895 size_t len;
1896 char *end; 1896 char *end;
1897 1897
1898 if (unlikely(!dev_name || !*dev_name)) {
1899 dfprintk(MOUNT, "NFS: device name not specified\n");
1900 return -EINVAL;
1901 }
1902
1898 /* Is the host name protected with square brakcets? */ 1903 /* Is the host name protected with square brakcets? */
1899 if (*dev_name == '[') { 1904 if (*dev_name == '[') {
1900 end = strchr(++dev_name, ']'); 1905 end = strchr(++dev_name, ']');
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5a0bbf917a32..d09c9f878141 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -238,9 +238,9 @@ out:
238} 238}
239 239
240/* A writeback failed: mark the page as bad, and invalidate the page cache */ 240/* A writeback failed: mark the page as bad, and invalidate the page cache */
241static void nfs_set_pageerror(struct page *page) 241static void nfs_set_pageerror(struct address_space *mapping)
242{ 242{
243 nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); 243 nfs_zap_mapping(mapping->host, mapping);
244} 244}
245 245
246/* 246/*
@@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
621 nfs_set_page_writeback(page); 621 nfs_set_page_writeback(page);
622 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 622 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
623 623
624 ret = 0; 624 ret = req->wb_context->error;
625 /* If there is a fatal error that covers this write, just exit */ 625 /* If there is a fatal error that covers this write, just exit */
626 if (nfs_error_is_fatal_on_server(req->wb_context->error)) 626 if (nfs_error_is_fatal_on_server(ret))
627 goto out_launder; 627 goto out_launder;
628 628
629 ret = 0;
629 if (!nfs_pageio_add_request(pgio, req)) { 630 if (!nfs_pageio_add_request(pgio, req)) {
630 ret = pgio->pg_error; 631 ret = pgio->pg_error;
631 /* 632 /*
@@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
635 nfs_context_set_write_error(req->wb_context, ret); 636 nfs_context_set_write_error(req->wb_context, ret);
636 if (nfs_error_is_fatal_on_server(ret)) 637 if (nfs_error_is_fatal_on_server(ret))
637 goto out_launder; 638 goto out_launder;
638 } 639 } else
640 ret = -EAGAIN;
639 nfs_redirty_request(req); 641 nfs_redirty_request(req);
640 ret = -EAGAIN;
641 } else 642 } else
642 nfs_add_stats(page_file_mapping(page)->host, 643 nfs_add_stats(page_file_mapping(page)->host,
643 NFSIOS_WRITEPAGES, 1); 644 NFSIOS_WRITEPAGES, 1);
@@ -993,7 +994,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
993 nfs_list_remove_request(req); 994 nfs_list_remove_request(req);
994 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 995 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
995 (hdr->good_bytes < bytes)) { 996 (hdr->good_bytes < bytes)) {
996 nfs_set_pageerror(req->wb_page); 997 nfs_set_pageerror(page_file_mapping(req->wb_page));
997 nfs_context_set_write_error(req->wb_context, hdr->error); 998 nfs_context_set_write_error(req->wb_context, hdr->error);
998 goto remove_req; 999 goto remove_req;
999 } 1000 }
@@ -1347,7 +1348,8 @@ int nfs_updatepage(struct file *file, struct page *page,
1347 unsigned int offset, unsigned int count) 1348 unsigned int offset, unsigned int count)
1348{ 1349{
1349 struct nfs_open_context *ctx = nfs_file_open_context(file); 1350 struct nfs_open_context *ctx = nfs_file_open_context(file);
1350 struct inode *inode = page_file_mapping(page)->host; 1351 struct address_space *mapping = page_file_mapping(page);
1352 struct inode *inode = mapping->host;
1351 int status = 0; 1353 int status = 0;
1352 1354
1353 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1355 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -1365,7 +1367,7 @@ int nfs_updatepage(struct file *file, struct page *page,
1365 1367
1366 status = nfs_writepage_setup(ctx, page, offset, count); 1368 status = nfs_writepage_setup(ctx, page, offset, count);
1367 if (status < 0) 1369 if (status < 0)
1368 nfs_set_pageerror(page); 1370 nfs_set_pageerror(mapping);
1369 else 1371 else
1370 __set_page_dirty_nobuffers(page); 1372 __set_page_dirty_nobuffers(page);
1371out: 1373out:
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index b33f9785b756..72a7681f4046 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net)
1239 retval = nfsd_idmap_init(net); 1239 retval = nfsd_idmap_init(net);
1240 if (retval) 1240 if (retval)
1241 goto out_idmap_error; 1241 goto out_idmap_error;
1242 nn->nfsd4_lease = 45; /* default lease time */ 1242 nn->nfsd4_lease = 90; /* default lease time */
1243 nn->nfsd4_grace = 45; 1243 nn->nfsd4_grace = 90;
1244 nn->somebody_reclaimed = false; 1244 nn->somebody_reclaimed = false;
1245 nn->clverifier_counter = prandom_u32(); 1245 nn->clverifier_counter = prandom_u32();
1246 nn->clientid_counter = prandom_u32(); 1246 nn->clientid_counter = prandom_u32();
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 9824e32b2f23..7dc98e14655d 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -557,9 +557,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
557 loff_t cloned; 557 loff_t cloned;
558 558
559 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); 559 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
560 if (cloned < 0)
561 return nfserrno(cloned);
560 if (count && cloned != count) 562 if (count && cloned != count)
561 cloned = -EINVAL; 563 return nfserrno(-EINVAL);
562 return nfserrno(cloned < 0 ? cloned : 0); 564 return 0;
563} 565}
564 566
565ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, 567ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 105576daca4a..798f1253141a 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
724 return -EBADF; 724 return -EBADF;
725 725
726 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ 726 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
727 if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) 727 if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
728 return -EINVAL; 728 ret = -EINVAL;
729 goto fput_and_out;
730 }
729 731
730 /* verify that this is indeed an inotify instance */ 732 /* verify that this is indeed an inotify instance */
731 if (unlikely(f.file->f_op != &inotify_fops)) { 733 if (unlikely(f.file->f_op != &inotify_fops)) {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 633a63462573..f5ed9512d193 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
1086 1086
1087 task_lock(p); 1087 task_lock(p);
1088 if (!p->vfork_done && process_shares_mm(p, mm)) { 1088 if (!p->vfork_done && process_shares_mm(p, mm)) {
1089 pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
1090 task_pid_nr(p), p->comm,
1091 p->signal->oom_score_adj, oom_adj,
1092 task_pid_nr(task), task->comm);
1093 p->signal->oom_score_adj = oom_adj; 1089 p->signal->oom_score_adj = oom_adj;
1094 if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) 1090 if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
1095 p->signal->oom_score_adj_min = (short)oom_adj; 1091 p->signal->oom_score_adj_min = (short)oom_adj;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 8ae109429a88..e39bac94dead 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
256 inode = proc_get_inode(dir->i_sb, de); 256 inode = proc_get_inode(dir->i_sb, de);
257 if (!inode) 257 if (!inode)
258 return ERR_PTR(-ENOMEM); 258 return ERR_PTR(-ENOMEM);
259 d_set_d_op(dentry, &proc_misc_dentry_ops); 259 d_set_d_op(dentry, de->proc_dops);
260 return d_splice_alias(inode, dentry); 260 return d_splice_alias(inode, dentry);
261 } 261 }
262 read_unlock(&proc_subdir_lock); 262 read_unlock(&proc_subdir_lock);
@@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
429 INIT_LIST_HEAD(&ent->pde_openers); 429 INIT_LIST_HEAD(&ent->pde_openers);
430 proc_set_user(ent, (*parent)->uid, (*parent)->gid); 430 proc_set_user(ent, (*parent)->uid, (*parent)->gid);
431 431
432 ent->proc_dops = &proc_misc_dentry_ops;
433
432out: 434out:
433 return ent; 435 return ent;
434} 436}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 5185d7f6a51e..95b14196f284 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -44,6 +44,7 @@ struct proc_dir_entry {
44 struct completion *pde_unload_completion; 44 struct completion *pde_unload_completion;
45 const struct inode_operations *proc_iops; 45 const struct inode_operations *proc_iops;
46 const struct file_operations *proc_fops; 46 const struct file_operations *proc_fops;
47 const struct dentry_operations *proc_dops;
47 union { 48 union {
48 const struct seq_operations *seq_ops; 49 const struct seq_operations *seq_ops;
49 int (*single_show)(struct seq_file *, void *); 50 int (*single_show)(struct seq_file *, void *);
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index d5e0fcb3439e..a7b12435519e 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode)
38 return maybe_get_net(PDE_NET(PDE(inode))); 38 return maybe_get_net(PDE_NET(PDE(inode)));
39} 39}
40 40
41static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
42{
43 return 0;
44}
45
46static const struct dentry_operations proc_net_dentry_ops = {
47 .d_revalidate = proc_net_d_revalidate,
48 .d_delete = always_delete_dentry,
49};
50
51static void pde_force_lookup(struct proc_dir_entry *pde)
52{
53 /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
54 pde->proc_dops = &proc_net_dentry_ops;
55}
56
41static int seq_open_net(struct inode *inode, struct file *file) 57static int seq_open_net(struct inode *inode, struct file *file)
42{ 58{
43 unsigned int state_size = PDE(inode)->state_size; 59 unsigned int state_size = PDE(inode)->state_size;
@@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
90 p = proc_create_reg(name, mode, &parent, data); 106 p = proc_create_reg(name, mode, &parent, data);
91 if (!p) 107 if (!p)
92 return NULL; 108 return NULL;
109 pde_force_lookup(p);
93 p->proc_fops = &proc_net_seq_fops; 110 p->proc_fops = &proc_net_seq_fops;
94 p->seq_ops = ops; 111 p->seq_ops = ops;
95 p->state_size = state_size; 112 p->state_size = state_size;
@@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
133 p = proc_create_reg(name, mode, &parent, data); 150 p = proc_create_reg(name, mode, &parent, data);
134 if (!p) 151 if (!p)
135 return NULL; 152 return NULL;
153 pde_force_lookup(p);
136 p->proc_fops = &proc_net_seq_fops; 154 p->proc_fops = &proc_net_seq_fops;
137 p->seq_ops = ops; 155 p->seq_ops = ops;
138 p->state_size = state_size; 156 p->state_size = state_size;
@@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
181 p = proc_create_reg(name, mode, &parent, data); 199 p = proc_create_reg(name, mode, &parent, data);
182 if (!p) 200 if (!p)
183 return NULL; 201 return NULL;
202 pde_force_lookup(p);
184 p->proc_fops = &proc_net_single_fops; 203 p->proc_fops = &proc_net_single_fops;
185 p->single_show = show; 204 p->single_show = show;
186 return proc_register(parent, p); 205 return proc_register(parent, p);
@@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
223 p = proc_create_reg(name, mode, &parent, data); 242 p = proc_create_reg(name, mode, &parent, data);
224 if (!p) 243 if (!p)
225 return NULL; 244 return NULL;
245 pde_force_lookup(p);
226 p->proc_fops = &proc_net_single_fops; 246 p->proc_fops = &proc_net_single_fops;
227 p->single_show = show; 247 p->single_show = show;
228 p->write = write; 248 p->write = write;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f0ec9edab2f3..85b0ef890b28 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -423,7 +423,7 @@ struct mem_size_stats {
423}; 423};
424 424
425static void smaps_account(struct mem_size_stats *mss, struct page *page, 425static void smaps_account(struct mem_size_stats *mss, struct page *page,
426 bool compound, bool young, bool dirty) 426 bool compound, bool young, bool dirty, bool locked)
427{ 427{
428 int i, nr = compound ? 1 << compound_order(page) : 1; 428 int i, nr = compound ? 1 << compound_order(page) : 1;
429 unsigned long size = nr * PAGE_SIZE; 429 unsigned long size = nr * PAGE_SIZE;
@@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
450 else 450 else
451 mss->private_clean += size; 451 mss->private_clean += size;
452 mss->pss += (u64)size << PSS_SHIFT; 452 mss->pss += (u64)size << PSS_SHIFT;
453 if (locked)
454 mss->pss_locked += (u64)size << PSS_SHIFT;
453 return; 455 return;
454 } 456 }
455 457
456 for (i = 0; i < nr; i++, page++) { 458 for (i = 0; i < nr; i++, page++) {
457 int mapcount = page_mapcount(page); 459 int mapcount = page_mapcount(page);
460 unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
458 461
459 if (mapcount >= 2) { 462 if (mapcount >= 2) {
460 if (dirty || PageDirty(page)) 463 if (dirty || PageDirty(page))
461 mss->shared_dirty += PAGE_SIZE; 464 mss->shared_dirty += PAGE_SIZE;
462 else 465 else
463 mss->shared_clean += PAGE_SIZE; 466 mss->shared_clean += PAGE_SIZE;
464 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; 467 mss->pss += pss / mapcount;
468 if (locked)
469 mss->pss_locked += pss / mapcount;
465 } else { 470 } else {
466 if (dirty || PageDirty(page)) 471 if (dirty || PageDirty(page))
467 mss->private_dirty += PAGE_SIZE; 472 mss->private_dirty += PAGE_SIZE;
468 else 473 else
469 mss->private_clean += PAGE_SIZE; 474 mss->private_clean += PAGE_SIZE;
470 mss->pss += PAGE_SIZE << PSS_SHIFT; 475 mss->pss += pss;
476 if (locked)
477 mss->pss_locked += pss;
471 } 478 }
472 } 479 }
473} 480}
@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
490{ 497{
491 struct mem_size_stats *mss = walk->private; 498 struct mem_size_stats *mss = walk->private;
492 struct vm_area_struct *vma = walk->vma; 499 struct vm_area_struct *vma = walk->vma;
500 bool locked = !!(vma->vm_flags & VM_LOCKED);
493 struct page *page = NULL; 501 struct page *page = NULL;
494 502
495 if (pte_present(*pte)) { 503 if (pte_present(*pte)) {
@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
532 if (!page) 540 if (!page)
533 return; 541 return;
534 542
535 smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte)); 543 smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
536} 544}
537 545
538#ifdef CONFIG_TRANSPARENT_HUGEPAGE 546#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
541{ 549{
542 struct mem_size_stats *mss = walk->private; 550 struct mem_size_stats *mss = walk->private;
543 struct vm_area_struct *vma = walk->vma; 551 struct vm_area_struct *vma = walk->vma;
552 bool locked = !!(vma->vm_flags & VM_LOCKED);
544 struct page *page; 553 struct page *page;
545 554
546 /* FOLL_DUMP will return -EFAULT on huge zero page */ 555 /* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
555 /* pass */; 564 /* pass */;
556 else 565 else
557 VM_BUG_ON_PAGE(1, page); 566 VM_BUG_ON_PAGE(1, page);
558 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); 567 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
559} 568}
560#else 569#else
561static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 570static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
737 } 746 }
738 } 747 }
739#endif 748#endif
740
741 /* mmap_sem is held in m_start */ 749 /* mmap_sem is held in m_start */
742 walk_page_vma(vma, &smaps_walk); 750 walk_page_vma(vma, &smaps_walk);
743 if (vma->vm_flags & VM_LOCKED)
744 mss->pss_locked += mss->pss;
745} 751}
746 752
747#define SEQ_PUT_DEC(str, val) \ 753#define SEQ_PUT_DEC(str, val) \
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 96f7d32cd184..898c8321b343 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -128,7 +128,6 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
128 struct pstore_record *record) 128 struct pstore_record *record)
129{ 129{
130 struct persistent_ram_zone *prz; 130 struct persistent_ram_zone *prz;
131 bool update = (record->type == PSTORE_TYPE_DMESG);
132 131
133 /* Give up if we never existed or have hit the end. */ 132 /* Give up if we never existed or have hit the end. */
134 if (!przs) 133 if (!przs)
@@ -139,7 +138,7 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
139 return NULL; 138 return NULL;
140 139
141 /* Update old/shadowed buffer. */ 140 /* Update old/shadowed buffer. */
142 if (update) 141 if (prz->type == PSTORE_TYPE_DMESG)
143 persistent_ram_save_old(prz); 142 persistent_ram_save_old(prz);
144 143
145 if (!persistent_ram_old_size(prz)) 144 if (!persistent_ram_old_size(prz))
@@ -711,18 +710,15 @@ static int ramoops_probe(struct platform_device *pdev)
711{ 710{
712 struct device *dev = &pdev->dev; 711 struct device *dev = &pdev->dev;
713 struct ramoops_platform_data *pdata = dev->platform_data; 712 struct ramoops_platform_data *pdata = dev->platform_data;
713 struct ramoops_platform_data pdata_local;
714 struct ramoops_context *cxt = &oops_cxt; 714 struct ramoops_context *cxt = &oops_cxt;
715 size_t dump_mem_sz; 715 size_t dump_mem_sz;
716 phys_addr_t paddr; 716 phys_addr_t paddr;
717 int err = -EINVAL; 717 int err = -EINVAL;
718 718
719 if (dev_of_node(dev) && !pdata) { 719 if (dev_of_node(dev) && !pdata) {
720 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 720 pdata = &pdata_local;
721 if (!pdata) { 721 memset(pdata, 0, sizeof(*pdata));
722 pr_err("cannot allocate platform data buffer\n");
723 err = -ENOMEM;
724 goto fail_out;
725 }
726 722
727 err = ramoops_parse_dt(pdev, pdata); 723 err = ramoops_parse_dt(pdev, pdata);
728 if (err < 0) 724 if (err < 0)
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index feeae8081c22..aa85f2874a9f 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -43,7 +43,8 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
43 kuid_t uid; 43 kuid_t uid;
44 kgid_t gid; 44 kgid_t gid;
45 45
46 BUG_ON(!kobj); 46 if (WARN_ON(!kobj))
47 return -EINVAL;
47 48
48 if (kobj->parent) 49 if (kobj->parent)
49 parent = kobj->parent->sd; 50 parent = kobj->parent->sd;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index bb71db63c99c..51398457fe00 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -325,7 +325,8 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
325 kuid_t uid; 325 kuid_t uid;
326 kgid_t gid; 326 kgid_t gid;
327 327
328 BUG_ON(!kobj || !kobj->sd || !attr); 328 if (WARN_ON(!kobj || !kobj->sd || !attr))
329 return -EINVAL;
329 330
330 kobject_get_ownership(kobj, &uid, &gid); 331 kobject_get_ownership(kobj, &uid, &gid);
331 return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, 332 return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode,
@@ -537,7 +538,8 @@ int sysfs_create_bin_file(struct kobject *kobj,
537 kuid_t uid; 538 kuid_t uid;
538 kgid_t gid; 539 kgid_t gid;
539 540
540 BUG_ON(!kobj || !kobj->sd || !attr); 541 if (WARN_ON(!kobj || !kobj->sd || !attr))
542 return -EINVAL;
541 543
542 kobject_get_ownership(kobj, &uid, &gid); 544 kobject_get_ownership(kobj, &uid, &gid);
543 return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true, 545 return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true,
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 1eb2d6307663..57038604d4a8 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -112,7 +112,8 @@ static int internal_create_group(struct kobject *kobj, int update,
112 kgid_t gid; 112 kgid_t gid;
113 int error; 113 int error;
114 114
115 BUG_ON(!kobj || (!update && !kobj->sd)); 115 if (WARN_ON(!kobj || (!update && !kobj->sd)))
116 return -EINVAL;
116 117
117 /* Updates may happen before the object has been instantiated */ 118 /* Updates may happen before the object has been instantiated */
118 if (unlikely(update && !kobj->sd)) 119 if (unlikely(update && !kobj->sd))
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 215c225b2ca1..c4deecc80f67 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -23,7 +23,8 @@ static int sysfs_do_create_link_sd(struct kernfs_node *parent,
23{ 23{
24 struct kernfs_node *kn, *target = NULL; 24 struct kernfs_node *kn, *target = NULL;
25 25
26 BUG_ON(!name || !parent); 26 if (WARN_ON(!name || !parent))
27 return -EINVAL;
27 28
28 /* 29 /*
29 * We don't own @target_kobj and it may be removed at any time. 30 * We don't own @target_kobj and it may be removed at any time.
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 1c8eecfe52b8..6acf1bfa0bfe 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -768,18 +768,23 @@ xrep_findroot_block(
768 if (!uuid_equal(&btblock->bb_u.s.bb_uuid, 768 if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
769 &mp->m_sb.sb_meta_uuid)) 769 &mp->m_sb.sb_meta_uuid))
770 goto out; 770 goto out;
771 /*
772 * Read verifiers can reference b_ops, so we set the pointer
773 * here. If the verifier fails we'll reset the buffer state
774 * to what it was before we touched the buffer.
775 */
776 bp->b_ops = fab->buf_ops;
771 fab->buf_ops->verify_read(bp); 777 fab->buf_ops->verify_read(bp);
772 if (bp->b_error) { 778 if (bp->b_error) {
779 bp->b_ops = NULL;
773 bp->b_error = 0; 780 bp->b_error = 0;
774 goto out; 781 goto out;
775 } 782 }
776 783
777 /* 784 /*
778 * Some read verifiers will (re)set b_ops, so we must be 785 * Some read verifiers will (re)set b_ops, so we must be
779 * careful not to blow away any such assignment. 786 * careful not to change b_ops after running the verifier.
780 */ 787 */
781 if (!bp->b_ops)
782 bp->b_ops = fab->buf_ops;
783 } 788 }
784 789
785 /* 790 /*
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 338b9d9984e0..d9048bcea49c 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -449,6 +449,7 @@ xfs_map_blocks(
449 } 449 }
450 450
451 wpc->imap = imap; 451 wpc->imap = imap;
452 xfs_trim_extent_eof(&wpc->imap, ip);
452 trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap); 453 trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
453 return 0; 454 return 0;
454allocate_blocks: 455allocate_blocks:
@@ -459,6 +460,7 @@ allocate_blocks:
459 ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF || 460 ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
460 imap.br_startoff + imap.br_blockcount <= cow_fsb); 461 imap.br_startoff + imap.br_blockcount <= cow_fsb);
461 wpc->imap = imap; 462 wpc->imap = imap;
463 xfs_trim_extent_eof(&wpc->imap, ip);
462 trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap); 464 trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
463 return 0; 465 return 0;
464} 466}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index eedc5e0156ff..4f5f2ff3f70f 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -776,10 +776,26 @@ _xfs_buf_read(
776} 776}
777 777
778/* 778/*
779 * Set buffer ops on an unchecked buffer and validate it, if possible.
780 *
779 * If the caller passed in an ops structure and the buffer doesn't have ops 781 * If the caller passed in an ops structure and the buffer doesn't have ops
780 * assigned, set the ops and use them to verify the contents. If the contents 782 * assigned, set the ops and use them to verify the contents. If the contents
781 * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no 783 * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no
782 * recorded errors and is already in XBF_DONE state. 784 * recorded errors and is already in XBF_DONE state.
785 *
786 * Under normal operations, every in-core buffer must have buffer ops assigned
787 * to them when the buffer is read in from disk so that we can validate the
788 * metadata.
789 *
790 * However, there are two scenarios where one can encounter in-core buffers
791 * that don't have buffer ops. The first is during log recovery of buffers on
792 * a V4 filesystem, though these buffers are purged at the end of recovery.
793 *
794 * The other is online repair, which tries to match arbitrary metadata blocks
795 * with btree types in order to find the root. If online repair doesn't match
796 * the buffer with /any/ btree type, the buffer remains in memory in DONE state
797 * with no ops, and a subsequent read_buf call from elsewhere will not set the
798 * ops. This function helps us fix this situation.
783 */ 799 */
784int 800int
785xfs_buf_ensure_ops( 801xfs_buf_ensure_ops(
@@ -1536,8 +1552,7 @@ __xfs_buf_submit(
1536 xfs_buf_ioerror(bp, -EIO); 1552 xfs_buf_ioerror(bp, -EIO);
1537 bp->b_flags &= ~XBF_DONE; 1553 bp->b_flags &= ~XBF_DONE;
1538 xfs_buf_stale(bp); 1554 xfs_buf_stale(bp);
1539 if (bp->b_flags & XBF_ASYNC) 1555 xfs_buf_ioend(bp);
1540 xfs_buf_ioend(bp);
1541 return -EIO; 1556 return -EIO;
1542 } 1557 }
1543 1558
diff --git a/include/uapi/asm-generic/shmparam.h b/include/asm-generic/shmparam.h
index 8b78c0ba08b1..8b78c0ba08b1 100644
--- a/include/uapi/asm-generic/shmparam.h
+++ b/include/asm-generic/shmparam.h
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 5736c942c85b..2d4fc2d33810 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1365,6 +1365,13 @@ enum drm_dp_quirk {
1365 * to 16 bits. So will give a constant value (0x8000) for compatability. 1365 * to 16 bits. So will give a constant value (0x8000) for compatability.
1366 */ 1366 */
1367 DP_DPCD_QUIRK_CONSTANT_N, 1367 DP_DPCD_QUIRK_CONSTANT_N,
1368 /**
1369 * @DP_DPCD_QUIRK_NO_PSR:
1370 *
1371 * The device does not support PSR even if reports that it supports or
1372 * driver still need to implement proper handling for such device.
1373 */
1374 DP_DPCD_QUIRK_NO_PSR,
1368}; 1375};
1369 1376
1370/** 1377/**
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 59f005b419cf..727af08e5ea6 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -616,7 +616,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
616 struct drm_dp_mst_topology_mgr *mgr); 616 struct drm_dp_mst_topology_mgr *mgr);
617 617
618void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); 618void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
619int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); 619int __must_check
620drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
620struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, 621struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
621 struct drm_dp_mst_topology_mgr *mgr); 622 struct drm_dp_mst_topology_mgr *mgr);
622int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, 623int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
diff --git a/include/dt-bindings/clock/actions,s500-cmu.h b/include/dt-bindings/clock/actions,s500-cmu.h
new file mode 100644
index 000000000000..030981cd2d56
--- /dev/null
+++ b/include/dt-bindings/clock/actions,s500-cmu.h
@@ -0,0 +1,78 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Device Tree binding constants for Actions Semi S500 Clock Management Unit
4 *
5 * Copyright (c) 2014 Actions Semi Inc.
6 * Copyright (c) 2018 LSI-TEC - Caninos Loucos
7 */
8
9#ifndef __DT_BINDINGS_CLOCK_S500_CMU_H
10#define __DT_BINDINGS_CLOCK_S500_CMU_H
11
12#define CLK_NONE 0
13
14/* fixed rate clocks */
15#define CLK_LOSC 1
16#define CLK_HOSC 2
17
18/* pll clocks */
19#define CLK_CORE_PLL 3
20#define CLK_DEV_PLL 4
21#define CLK_DDR_PLL 5
22#define CLK_NAND_PLL 6
23#define CLK_DISPLAY_PLL 7
24#define CLK_ETHERNET_PLL 8
25#define CLK_AUDIO_PLL 9
26
27/* system clock */
28#define CLK_DEV 10
29#define CLK_H 11
30#define CLK_AHBPREDIV 12
31#define CLK_AHB 13
32#define CLK_DE 14
33#define CLK_BISP 15
34#define CLK_VCE 16
35#define CLK_VDE 17
36
37/* peripheral device clock */
38#define CLK_TIMER 18
39#define CLK_I2C0 19
40#define CLK_I2C1 20
41#define CLK_I2C2 21
42#define CLK_I2C3 22
43#define CLK_PWM0 23
44#define CLK_PWM1 24
45#define CLK_PWM2 25
46#define CLK_PWM3 26
47#define CLK_PWM4 27
48#define CLK_PWM5 28
49#define CLK_SD0 29
50#define CLK_SD1 30
51#define CLK_SD2 31
52#define CLK_SENSOR0 32
53#define CLK_SENSOR1 33
54#define CLK_SPI0 34
55#define CLK_SPI1 35
56#define CLK_SPI2 36
57#define CLK_SPI3 37
58#define CLK_UART0 38
59#define CLK_UART1 39
60#define CLK_UART2 40
61#define CLK_UART3 41
62#define CLK_UART4 42
63#define CLK_UART5 43
64#define CLK_UART6 44
65#define CLK_DE1 45
66#define CLK_DE2 46
67#define CLK_I2SRX 47
68#define CLK_I2STX 48
69#define CLK_HDMI_AUDIO 49
70#define CLK_HDMI 50
71#define CLK_SPDIF 51
72#define CLK_NAND 52
73#define CLK_ECC 53
74#define CLK_RMII_REF 54
75
76#define CLK_NR_CLKS (CLK_RMII_REF + 1)
77
78#endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */
diff --git a/include/dt-bindings/clock/axg-aoclkc.h b/include/dt-bindings/clock/axg-aoclkc.h
index 61955016a55b..8ec4a269c7a6 100644
--- a/include/dt-bindings/clock/axg-aoclkc.h
+++ b/include/dt-bindings/clock/axg-aoclkc.h
@@ -21,6 +21,11 @@
21#define CLKID_AO_SAR_ADC_SEL 8 21#define CLKID_AO_SAR_ADC_SEL 8
22#define CLKID_AO_SAR_ADC_DIV 9 22#define CLKID_AO_SAR_ADC_DIV 9
23#define CLKID_AO_SAR_ADC_CLK 10 23#define CLKID_AO_SAR_ADC_CLK 10
24#define CLKID_AO_ALT_XTAL 11 24#define CLKID_AO_CTS_OSCIN 11
25#define CLKID_AO_32K_PRE 12
26#define CLKID_AO_32K_DIV 13
27#define CLKID_AO_32K_SEL 14
28#define CLKID_AO_32K 15
29#define CLKID_AO_CTS_RTC_OSCIN 16
25 30
26#endif 31#endif
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index 98bd85ce1e45..25ffa53573a5 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -156,7 +156,7 @@
156#define CLK_ACLK_G2D_266 220 156#define CLK_ACLK_G2D_266 220
157#define CLK_ACLK_G2D_400 221 157#define CLK_ACLK_G2D_400 221
158#define CLK_ACLK_G3D_400 222 158#define CLK_ACLK_G3D_400 222
159#define CLK_ACLK_IMEM_SSX_266 223 159#define CLK_ACLK_IMEM_SSSX_266 223
160#define CLK_ACLK_BUS0_400 224 160#define CLK_ACLK_BUS0_400 224
161#define CLK_ACLK_BUS1_400 225 161#define CLK_ACLK_BUS1_400 225
162#define CLK_ACLK_IMEM_200 226 162#define CLK_ACLK_IMEM_200 226
@@ -1406,4 +1406,10 @@
1406 1406
1407#define CAM1_NR_CLK 113 1407#define CAM1_NR_CLK 113
1408 1408
1409/* CMU_IMEM */
1410#define CLK_ACLK_SLIMSSS 2
1411#define CLK_PCLK_SLIMSSS 35
1412
1413#define IMEM_NR_CLK 36
1414
1409#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */ 1415#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */
diff --git a/include/dt-bindings/clock/g12a-aoclkc.h b/include/dt-bindings/clock/g12a-aoclkc.h
new file mode 100644
index 000000000000..8db01ffbeb06
--- /dev/null
+++ b/include/dt-bindings/clock/g12a-aoclkc.h
@@ -0,0 +1,34 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Copyright (c) 2016 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 *
6 * Copyright (c) 2018 Amlogic, inc.
7 * Author: Qiufang Dai <qiufang.dai@amlogic.com>
8 */
9
10#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_G12A_AOCLK
11#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_G12A_AOCLK
12
13#define CLKID_AO_AHB 0
14#define CLKID_AO_IR_IN 1
15#define CLKID_AO_I2C_M0 2
16#define CLKID_AO_I2C_S0 3
17#define CLKID_AO_UART 4
18#define CLKID_AO_PROD_I2C 5
19#define CLKID_AO_UART2 6
20#define CLKID_AO_IR_OUT 7
21#define CLKID_AO_SAR_ADC 8
22#define CLKID_AO_MAILBOX 9
23#define CLKID_AO_M3 10
24#define CLKID_AO_AHB_SRAM 11
25#define CLKID_AO_RTI 12
26#define CLKID_AO_M4_FCLK 13
27#define CLKID_AO_M4_HCLK 14
28#define CLKID_AO_CLK81 15
29#define CLKID_AO_SAR_ADC_CLK 18
30#define CLKID_AO_32K 23
31#define CLKID_AO_CEC 27
32#define CLKID_AO_CTS_RTC_OSCIN 28
33
34#endif
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
new file mode 100644
index 000000000000..83b657038d1e
--- /dev/null
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -0,0 +1,135 @@
1/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
2/*
3 * Meson-G12A clock tree IDs
4 *
5 * Copyright (c) 2018 Amlogic, Inc. All rights reserved.
6 */
7
8#ifndef __G12A_CLKC_H
9#define __G12A_CLKC_H
10
11#define CLKID_SYS_PLL 0
12#define CLKID_FIXED_PLL 1
13#define CLKID_FCLK_DIV2 2
14#define CLKID_FCLK_DIV3 3
15#define CLKID_FCLK_DIV4 4
16#define CLKID_FCLK_DIV5 5
17#define CLKID_FCLK_DIV7 6
18#define CLKID_GP0_PLL 7
19#define CLKID_CLK81 10
20#define CLKID_MPLL0 11
21#define CLKID_MPLL1 12
22#define CLKID_MPLL2 13
23#define CLKID_MPLL3 14
24#define CLKID_DDR 15
25#define CLKID_DOS 16
26#define CLKID_AUDIO_LOCKER 17
27#define CLKID_MIPI_DSI_HOST 18
28#define CLKID_ETH_PHY 19
29#define CLKID_ISA 20
30#define CLKID_PL301 21
31#define CLKID_PERIPHS 22
32#define CLKID_SPICC0 23
33#define CLKID_I2C 24
34#define CLKID_SANA 25
35#define CLKID_SD 26
36#define CLKID_RNG0 27
37#define CLKID_UART0 28
38#define CLKID_SPICC1 29
39#define CLKID_HIU_IFACE 30
40#define CLKID_MIPI_DSI_PHY 31
41#define CLKID_ASSIST_MISC 32
42#define CLKID_SD_EMMC_A 33
43#define CLKID_SD_EMMC_B 34
44#define CLKID_SD_EMMC_C 35
45#define CLKID_AUDIO_CODEC 36
46#define CLKID_AUDIO 37
47#define CLKID_ETH 38
48#define CLKID_DEMUX 39
49#define CLKID_AUDIO_IFIFO 40
50#define CLKID_ADC 41
51#define CLKID_UART1 42
52#define CLKID_G2D 43
53#define CLKID_RESET 44
54#define CLKID_PCIE_COMB 45
55#define CLKID_PARSER 46
56#define CLKID_USB 47
57#define CLKID_PCIE_PHY 48
58#define CLKID_AHB_ARB0 49
59#define CLKID_AHB_DATA_BUS 50
60#define CLKID_AHB_CTRL_BUS 51
61#define CLKID_HTX_HDCP22 52
62#define CLKID_HTX_PCLK 53
63#define CLKID_BT656 54
64#define CLKID_USB1_DDR_BRIDGE 55
65#define CLKID_MMC_PCLK 56
66#define CLKID_UART2 57
67#define CLKID_VPU_INTR 58
68#define CLKID_GIC 59
69#define CLKID_SD_EMMC_A_CLK0 60
70#define CLKID_SD_EMMC_B_CLK0 61
71#define CLKID_SD_EMMC_C_CLK0 62
72#define CLKID_HIFI_PLL 74
73#define CLKID_VCLK2_VENCI0 80
74#define CLKID_VCLK2_VENCI1 81
75#define CLKID_VCLK2_VENCP0 82
76#define CLKID_VCLK2_VENCP1 83
77#define CLKID_VCLK2_VENCT0 84
78#define CLKID_VCLK2_VENCT1 85
79#define CLKID_VCLK2_OTHER 86
80#define CLKID_VCLK2_ENCI 87
81#define CLKID_VCLK2_ENCP 88
82#define CLKID_DAC_CLK 89
83#define CLKID_AOCLK 90
84#define CLKID_IEC958 91
85#define CLKID_ENC480P 92
86#define CLKID_RNG1 93
87#define CLKID_VCLK2_ENCT 94
88#define CLKID_VCLK2_ENCL 95
89#define CLKID_VCLK2_VENCLMMC 96
90#define CLKID_VCLK2_VENCL 97
91#define CLKID_VCLK2_OTHER1 98
92#define CLKID_FCLK_DIV2P5 99
93#define CLKID_DMA 105
94#define CLKID_EFUSE 106
95#define CLKID_ROM_BOOT 107
96#define CLKID_RESET_SEC 108
97#define CLKID_SEC_AHB_APB3 109
98#define CLKID_VPU_0_SEL 110
99#define CLKID_VPU_0 112
100#define CLKID_VPU_1_SEL 113
101#define CLKID_VPU_1 115
102#define CLKID_VPU 116
103#define CLKID_VAPB_0_SEL 117
104#define CLKID_VAPB_0 119
105#define CLKID_VAPB_1_SEL 120
106#define CLKID_VAPB_1 122
107#define CLKID_VAPB_SEL 123
108#define CLKID_VAPB 124
109#define CLKID_HDMI_PLL 128
110#define CLKID_VID_PLL 129
111#define CLKID_VCLK 138
112#define CLKID_VCLK2 139
113#define CLKID_VCLK_DIV1 148
114#define CLKID_VCLK_DIV2 149
115#define CLKID_VCLK_DIV4 150
116#define CLKID_VCLK_DIV6 151
117#define CLKID_VCLK_DIV12 152
118#define CLKID_VCLK2_DIV1 153
119#define CLKID_VCLK2_DIV2 154
120#define CLKID_VCLK2_DIV4 155
121#define CLKID_VCLK2_DIV6 156
122#define CLKID_VCLK2_DIV12 157
123#define CLKID_CTS_ENCI 162
124#define CLKID_CTS_ENCP 163
125#define CLKID_CTS_VDAC 164
126#define CLKID_HDMI_TX 165
127#define CLKID_HDMI 168
128#define CLKID_MALI_0_SEL 169
129#define CLKID_MALI_0 171
130#define CLKID_MALI_1_SEL 172
131#define CLKID_MALI_1 174
132#define CLKID_MALI 175
133#define CLKID_MPLL_5OM 177
134
135#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/gxbb-aoclkc.h b/include/dt-bindings/clock/gxbb-aoclkc.h
index 9d15e2221fdb..ec3b26319fc4 100644
--- a/include/dt-bindings/clock/gxbb-aoclkc.h
+++ b/include/dt-bindings/clock/gxbb-aoclkc.h
@@ -63,5 +63,12 @@
63#define CLKID_AO_UART2 4 63#define CLKID_AO_UART2 4
64#define CLKID_AO_IR_BLASTER 5 64#define CLKID_AO_IR_BLASTER 5
65#define CLKID_AO_CEC_32K 6 65#define CLKID_AO_CEC_32K 6
66#define CLKID_AO_CTS_OSCIN 7
67#define CLKID_AO_32K_PRE 8
68#define CLKID_AO_32K_DIV 9
69#define CLKID_AO_32K_SEL 10
70#define CLKID_AO_32K 11
71#define CLKID_AO_CTS_RTC_OSCIN 12
72#define CLKID_AO_CLK81 13
66 73
67#endif 74#endif
diff --git a/include/dt-bindings/clock/imx5-clock.h b/include/dt-bindings/clock/imx5-clock.h
index d382fc71aa83..a81be5be6700 100644
--- a/include/dt-bindings/clock/imx5-clock.h
+++ b/include/dt-bindings/clock/imx5-clock.h
@@ -214,6 +214,7 @@
214#define IMX5_CLK_IEEE1588_SEL 202 214#define IMX5_CLK_IEEE1588_SEL 202
215#define IMX5_CLK_IEEE1588_PODF 203 215#define IMX5_CLK_IEEE1588_PODF 203
216#define IMX5_CLK_IEEE1588_GATE 204 216#define IMX5_CLK_IEEE1588_GATE 204
217#define IMX5_CLK_END 205 217#define IMX5_CLK_SCC2_IPG_GATE 205
218#define IMX5_CLK_END 206
218 219
219#endif /* __DT_BINDINGS_CLOCK_IMX5_H */ 220#endif /* __DT_BINDINGS_CLOCK_IMX5_H */
diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h
new file mode 100644
index 000000000000..1b4353e7b486
--- /dev/null
+++ b/include/dt-bindings/clock/imx8mm-clock.h
@@ -0,0 +1,244 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright 2017-2018 NXP
4 */
5
6#ifndef __DT_BINDINGS_CLOCK_IMX8MM_H
7#define __DT_BINDINGS_CLOCK_IMX8MM_H
8
9#define IMX8MM_CLK_DUMMY 0
10#define IMX8MM_CLK_32K 1
11#define IMX8MM_CLK_24M 2
12#define IMX8MM_OSC_HDMI_CLK 3
13#define IMX8MM_CLK_EXT1 4
14#define IMX8MM_CLK_EXT2 5
15#define IMX8MM_CLK_EXT3 6
16#define IMX8MM_CLK_EXT4 7
17#define IMX8MM_AUDIO_PLL1_REF_SEL 8
18#define IMX8MM_AUDIO_PLL2_REF_SEL 9
19#define IMX8MM_VIDEO_PLL1_REF_SEL 10
20#define IMX8MM_DRAM_PLL_REF_SEL 11
21#define IMX8MM_GPU_PLL_REF_SEL 12
22#define IMX8MM_VPU_PLL_REF_SEL 13
23#define IMX8MM_ARM_PLL_REF_SEL 14
24#define IMX8MM_SYS_PLL1_REF_SEL 15
25#define IMX8MM_SYS_PLL2_REF_SEL 16
26#define IMX8MM_SYS_PLL3_REF_SEL 17
27#define IMX8MM_AUDIO_PLL1 18
28#define IMX8MM_AUDIO_PLL2 19
29#define IMX8MM_VIDEO_PLL1 20
30#define IMX8MM_DRAM_PLL 21
31#define IMX8MM_GPU_PLL 22
32#define IMX8MM_VPU_PLL 23
33#define IMX8MM_ARM_PLL 24
34#define IMX8MM_SYS_PLL1 25
35#define IMX8MM_SYS_PLL2 26
36#define IMX8MM_SYS_PLL3 27
37#define IMX8MM_AUDIO_PLL1_BYPASS 28
38#define IMX8MM_AUDIO_PLL2_BYPASS 29
39#define IMX8MM_VIDEO_PLL1_BYPASS 30
40#define IMX8MM_DRAM_PLL_BYPASS 31
41#define IMX8MM_GPU_PLL_BYPASS 32
42#define IMX8MM_VPU_PLL_BYPASS 33
43#define IMX8MM_ARM_PLL_BYPASS 34
44#define IMX8MM_SYS_PLL1_BYPASS 35
45#define IMX8MM_SYS_PLL2_BYPASS 36
46#define IMX8MM_SYS_PLL3_BYPASS 37
47#define IMX8MM_AUDIO_PLL1_OUT 38
48#define IMX8MM_AUDIO_PLL2_OUT 39
49#define IMX8MM_VIDEO_PLL1_OUT 40
50#define IMX8MM_DRAM_PLL_OUT 41
51#define IMX8MM_GPU_PLL_OUT 42
52#define IMX8MM_VPU_PLL_OUT 43
53#define IMX8MM_ARM_PLL_OUT 44
54#define IMX8MM_SYS_PLL1_OUT 45
55#define IMX8MM_SYS_PLL2_OUT 46
56#define IMX8MM_SYS_PLL3_OUT 47
57#define IMX8MM_SYS_PLL1_40M 48
58#define IMX8MM_SYS_PLL1_80M 49
59#define IMX8MM_SYS_PLL1_100M 50
60#define IMX8MM_SYS_PLL1_133M 51
61#define IMX8MM_SYS_PLL1_160M 52
62#define IMX8MM_SYS_PLL1_200M 53
63#define IMX8MM_SYS_PLL1_266M 54
64#define IMX8MM_SYS_PLL1_400M 55
65#define IMX8MM_SYS_PLL1_800M 56
66#define IMX8MM_SYS_PLL2_50M 57
67#define IMX8MM_SYS_PLL2_100M 58
68#define IMX8MM_SYS_PLL2_125M 59
69#define IMX8MM_SYS_PLL2_166M 60
70#define IMX8MM_SYS_PLL2_200M 61
71#define IMX8MM_SYS_PLL2_250M 62
72#define IMX8MM_SYS_PLL2_333M 63
73#define IMX8MM_SYS_PLL2_500M 64
74#define IMX8MM_SYS_PLL2_1000M 65
75
76/* core */
77#define IMX8MM_CLK_A53_SRC 66
78#define IMX8MM_CLK_M4_SRC 67
79#define IMX8MM_CLK_VPU_SRC 68
80#define IMX8MM_CLK_GPU3D_SRC 69
81#define IMX8MM_CLK_GPU2D_SRC 70
82#define IMX8MM_CLK_A53_CG 71
83#define IMX8MM_CLK_M4_CG 72
84#define IMX8MM_CLK_VPU_CG 73
85#define IMX8MM_CLK_GPU3D_CG 74
86#define IMX8MM_CLK_GPU2D_CG 75
87#define IMX8MM_CLK_A53_DIV 76
88#define IMX8MM_CLK_M4_DIV 77
89#define IMX8MM_CLK_VPU_DIV 78
90#define IMX8MM_CLK_GPU3D_DIV 79
91#define IMX8MM_CLK_GPU2D_DIV 80
92
93/* bus */
94#define IMX8MM_CLK_MAIN_AXI 81
95#define IMX8MM_CLK_ENET_AXI 82
96#define IMX8MM_CLK_NAND_USDHC_BUS 83
97#define IMX8MM_CLK_VPU_BUS 84
98#define IMX8MM_CLK_DISP_AXI 85
99#define IMX8MM_CLK_DISP_APB 86
100#define IMX8MM_CLK_DISP_RTRM 87
101#define IMX8MM_CLK_USB_BUS 88
102#define IMX8MM_CLK_GPU_AXI 89
103#define IMX8MM_CLK_GPU_AHB 90
104#define IMX8MM_CLK_NOC 91
105#define IMX8MM_CLK_NOC_APB 92
106
107#define IMX8MM_CLK_AHB 93
108#define IMX8MM_CLK_AUDIO_AHB 94
109#define IMX8MM_CLK_IPG_ROOT 95
110#define IMX8MM_CLK_IPG_AUDIO_ROOT 96
111
112#define IMX8MM_CLK_DRAM_ALT 97
113#define IMX8MM_CLK_DRAM_APB 98
114#define IMX8MM_CLK_VPU_G1 99
115#define IMX8MM_CLK_VPU_G2 100
116#define IMX8MM_CLK_DISP_DTRC 101
117#define IMX8MM_CLK_DISP_DC8000 102
118#define IMX8MM_CLK_PCIE1_CTRL 103
119#define IMX8MM_CLK_PCIE1_PHY 104
120#define IMX8MM_CLK_PCIE1_AUX 105
121#define IMX8MM_CLK_DC_PIXEL 106
122#define IMX8MM_CLK_LCDIF_PIXEL 107
123#define IMX8MM_CLK_SAI1 108
124#define IMX8MM_CLK_SAI2 109
125#define IMX8MM_CLK_SAI3 110
126#define IMX8MM_CLK_SAI4 111
127#define IMX8MM_CLK_SAI5 112
128#define IMX8MM_CLK_SAI6 113
129#define IMX8MM_CLK_SPDIF1 114
130#define IMX8MM_CLK_SPDIF2 115
131#define IMX8MM_CLK_ENET_REF 116
132#define IMX8MM_CLK_ENET_TIMER 117
133#define IMX8MM_CLK_ENET_PHY_REF 118
134#define IMX8MM_CLK_NAND 119
135#define IMX8MM_CLK_QSPI 120
136#define IMX8MM_CLK_USDHC1 121
137#define IMX8MM_CLK_USDHC2 122
138#define IMX8MM_CLK_I2C1 123
139#define IMX8MM_CLK_I2C2 124
140#define IMX8MM_CLK_I2C3 125
141#define IMX8MM_CLK_I2C4 126
142#define IMX8MM_CLK_UART1 127
143#define IMX8MM_CLK_UART2 128
144#define IMX8MM_CLK_UART3 129
145#define IMX8MM_CLK_UART4 130
146#define IMX8MM_CLK_USB_CORE_REF 131
147#define IMX8MM_CLK_USB_PHY_REF 132
148#define IMX8MM_CLK_ECSPI1 133
149#define IMX8MM_CLK_ECSPI2 134
150#define IMX8MM_CLK_PWM1 135
151#define IMX8MM_CLK_PWM2 136
152#define IMX8MM_CLK_PWM3 137
153#define IMX8MM_CLK_PWM4 138
154#define IMX8MM_CLK_GPT1 139
155#define IMX8MM_CLK_WDOG 140
156#define IMX8MM_CLK_WRCLK 141
157#define IMX8MM_CLK_DSI_CORE 142
158#define IMX8MM_CLK_DSI_PHY_REF 143
159#define IMX8MM_CLK_DSI_DBI 144
160#define IMX8MM_CLK_USDHC3 145
161#define IMX8MM_CLK_CSI1_CORE 146
162#define IMX8MM_CLK_CSI1_PHY_REF 147
163#define IMX8MM_CLK_CSI1_ESC 148
164#define IMX8MM_CLK_CSI2_CORE 149
165#define IMX8MM_CLK_CSI2_PHY_REF 150
166#define IMX8MM_CLK_CSI2_ESC 151
167#define IMX8MM_CLK_PCIE2_CTRL 152
168#define IMX8MM_CLK_PCIE2_PHY 153
169#define IMX8MM_CLK_PCIE2_AUX 154
170#define IMX8MM_CLK_ECSPI3 155
171#define IMX8MM_CLK_PDM 156
172#define IMX8MM_CLK_VPU_H1 157
173#define IMX8MM_CLK_CLKO1 158
174
175#define IMX8MM_CLK_ECSPI1_ROOT 159
176#define IMX8MM_CLK_ECSPI2_ROOT 160
177#define IMX8MM_CLK_ECSPI3_ROOT 161
178#define IMX8MM_CLK_ENET1_ROOT 162
179#define IMX8MM_CLK_GPT1_ROOT 163
180#define IMX8MM_CLK_I2C1_ROOT 164
181#define IMX8MM_CLK_I2C2_ROOT 165
182#define IMX8MM_CLK_I2C3_ROOT 166
183#define IMX8MM_CLK_I2C4_ROOT 167
184#define IMX8MM_CLK_OCOTP_ROOT 168
185#define IMX8MM_CLK_PCIE1_ROOT 169
186#define IMX8MM_CLK_PWM1_ROOT 170
187#define IMX8MM_CLK_PWM2_ROOT 171
188#define IMX8MM_CLK_PWM3_ROOT 172
189#define IMX8MM_CLK_PWM4_ROOT 173
190#define IMX8MM_CLK_QSPI_ROOT 174
191#define IMX8MM_CLK_NAND_ROOT 175
192#define IMX8MM_CLK_SAI1_ROOT 176
193#define IMX8MM_CLK_SAI1_IPG 177
194#define IMX8MM_CLK_SAI2_ROOT 178
195#define IMX8MM_CLK_SAI2_IPG 179
196#define IMX8MM_CLK_SAI3_ROOT 180
197#define IMX8MM_CLK_SAI3_IPG 181
198#define IMX8MM_CLK_SAI4_ROOT 182
199#define IMX8MM_CLK_SAI4_IPG 183
200#define IMX8MM_CLK_SAI5_ROOT 184
201#define IMX8MM_CLK_SAI5_IPG 185
202#define IMX8MM_CLK_SAI6_ROOT 186
203#define IMX8MM_CLK_SAI6_IPG 187
204#define IMX8MM_CLK_UART1_ROOT 188
205#define IMX8MM_CLK_UART2_ROOT 189
206#define IMX8MM_CLK_UART3_ROOT 190
207#define IMX8MM_CLK_UART4_ROOT 191
208#define IMX8MM_CLK_USB1_CTRL_ROOT 192
209#define IMX8MM_CLK_GPU3D_ROOT 193
210#define IMX8MM_CLK_USDHC1_ROOT 194
211#define IMX8MM_CLK_USDHC2_ROOT 195
212#define IMX8MM_CLK_WDOG1_ROOT 196
213#define IMX8MM_CLK_WDOG2_ROOT 197
214#define IMX8MM_CLK_WDOG3_ROOT 198
215#define IMX8MM_CLK_VPU_G1_ROOT 199
216#define IMX8MM_CLK_GPU_BUS_ROOT 200
217#define IMX8MM_CLK_VPU_H1_ROOT 201
218#define IMX8MM_CLK_VPU_G2_ROOT 202
219#define IMX8MM_CLK_PDM_ROOT 203
220#define IMX8MM_CLK_DISP_ROOT 204
221#define IMX8MM_CLK_DISP_AXI_ROOT 205
222#define IMX8MM_CLK_DISP_APB_ROOT 206
223#define IMX8MM_CLK_DISP_RTRM_ROOT 207
224#define IMX8MM_CLK_USDHC3_ROOT 208
225#define IMX8MM_CLK_TMU_ROOT 209
226#define IMX8MM_CLK_VPU_DEC_ROOT 210
227#define IMX8MM_CLK_SDMA1_ROOT 211
228#define IMX8MM_CLK_SDMA2_ROOT 212
229#define IMX8MM_CLK_SDMA3_ROOT 213
230#define IMX8MM_CLK_GPT_3M 214
231#define IMX8MM_CLK_ARM 215
232#define IMX8MM_CLK_PDM_IPG 216
233#define IMX8MM_CLK_GPU2D_ROOT 217
234#define IMX8MM_CLK_MU_ROOT 218
235#define IMX8MM_CLK_CSI1_ROOT 219
236
237#define IMX8MM_CLK_DRAM_CORE 220
238#define IMX8MM_CLK_DRAM_ALT_ROOT 221
239
240#define IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK 222
241
242#define IMX8MM_CLK_END 223
243
244#endif
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
index b53be41929be..b58cc643c9c9 100644
--- a/include/dt-bindings/clock/imx8mq-clock.h
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -350,7 +350,7 @@
350#define IMX8MQ_CLK_VPU_G2_ROOT 241 350#define IMX8MQ_CLK_VPU_G2_ROOT 241
351 351
352/* SCCG PLL GATE */ 352/* SCCG PLL GATE */
353#define IMX8MQ_SYS1_PLL_OUT 232 353#define IMX8MQ_SYS1_PLL_OUT 242
354#define IMX8MQ_SYS2_PLL_OUT 243 354#define IMX8MQ_SYS2_PLL_OUT 243
355#define IMX8MQ_SYS3_PLL_OUT 244 355#define IMX8MQ_SYS3_PLL_OUT 244
356#define IMX8MQ_DRAM_PLL_OUT 245 356#define IMX8MQ_DRAM_PLL_OUT 245
@@ -372,24 +372,33 @@
372/* txesc clock */ 372/* txesc clock */
373#define IMX8MQ_CLK_DSI_IPG_DIV 256 373#define IMX8MQ_CLK_DSI_IPG_DIV 256
374 374
375#define IMX8MQ_CLK_TMU_ROOT 265 375#define IMX8MQ_CLK_TMU_ROOT 257
376 376
377/* Display root clocks */ 377/* Display root clocks */
378#define IMX8MQ_CLK_DISP_AXI_ROOT 266 378#define IMX8MQ_CLK_DISP_AXI_ROOT 258
379#define IMX8MQ_CLK_DISP_APB_ROOT 267 379#define IMX8MQ_CLK_DISP_APB_ROOT 259
380#define IMX8MQ_CLK_DISP_RTRM_ROOT 268 380#define IMX8MQ_CLK_DISP_RTRM_ROOT 260
381 381
382#define IMX8MQ_CLK_OCOTP_ROOT 269 382#define IMX8MQ_CLK_OCOTP_ROOT 261
383 383
384#define IMX8MQ_CLK_DRAM_ALT_ROOT 270 384#define IMX8MQ_CLK_DRAM_ALT_ROOT 262
385#define IMX8MQ_CLK_DRAM_CORE 271 385#define IMX8MQ_CLK_DRAM_CORE 263
386 386
387#define IMX8MQ_CLK_MU_ROOT 272 387#define IMX8MQ_CLK_MU_ROOT 264
388#define IMX8MQ_VIDEO2_PLL_OUT 273 388#define IMX8MQ_VIDEO2_PLL_OUT 265
389 389
390#define IMX8MQ_CLK_CLKO2 274 390#define IMX8MQ_CLK_CLKO2 266
391 391
392#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 275 392#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 267
393 393
394#define IMX8MQ_CLK_END 276 394#define IMX8MQ_CLK_CLKO1 268
395#define IMX8MQ_CLK_ARM 269
396
397#define IMX8MQ_CLK_GPIO1_ROOT 270
398#define IMX8MQ_CLK_GPIO2_ROOT 271
399#define IMX8MQ_CLK_GPIO3_ROOT 272
400#define IMX8MQ_CLK_GPIO4_ROOT 273
401#define IMX8MQ_CLK_GPIO5_ROOT 274
402
403#define IMX8MQ_CLK_END 275
395#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ 404#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 7b24fc791146..e785c6eb3561 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -71,7 +71,7 @@
71#define MMP2_CLK_CCIC1_MIX 117 71#define MMP2_CLK_CCIC1_MIX 117
72#define MMP2_CLK_CCIC1_PHY 118 72#define MMP2_CLK_CCIC1_PHY 118
73#define MMP2_CLK_CCIC1_SPHY 119 73#define MMP2_CLK_CCIC1_SPHY 119
74#define MMP2_CLK_SP 120 74#define MMP2_CLK_DISP0_LCDC 120
75 75
76#define MMP2_NR_CLKS 200 76#define MMP2_NR_CLKS 200
77#endif 77#endif
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 5fe2923382d0..8067077a62ca 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -104,6 +104,7 @@
104#define CLKID_MPLL2 95 104#define CLKID_MPLL2 95
105#define CLKID_NAND_CLK 112 105#define CLKID_NAND_CLK 112
106#define CLKID_ABP 124 106#define CLKID_ABP 124
107#define CLKID_APB 124
107#define CLKID_PERIPH 126 108#define CLKID_PERIPH 126
108#define CLKID_AXI 128 109#define CLKID_AXI 128
109#define CLKID_L2_DRAM 130 110#define CLKID_L2_DRAM 130
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index 3658b0c14966..ede93a0ca156 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -127,5 +127,15 @@
127#define RPM_SMD_BIMC_GPU_A_CLK 77 127#define RPM_SMD_BIMC_GPU_A_CLK 77
128#define RPM_SMD_QPIC_CLK 78 128#define RPM_SMD_QPIC_CLK 78
129#define RPM_SMD_QPIC_CLK_A 79 129#define RPM_SMD_QPIC_CLK_A 79
130#define RPM_SMD_LN_BB_CLK1 80
131#define RPM_SMD_LN_BB_CLK1_A 81
132#define RPM_SMD_LN_BB_CLK2 82
133#define RPM_SMD_LN_BB_CLK2_A 83
134#define RPM_SMD_LN_BB_CLK3_PIN 84
135#define RPM_SMD_LN_BB_CLK3_A_PIN 85
136#define RPM_SMD_RF_CLK3 86
137#define RPM_SMD_RF_CLK3_A 87
138#define RPM_SMD_RF_CLK3_PIN 88
139#define RPM_SMD_RF_CLK3_A_PIN 89
130 140
131#endif 141#endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index f48fbd6f2095..edcab3f7b7d3 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -18,5 +18,6 @@
18#define RPMH_RF_CLK2_A 9 18#define RPMH_RF_CLK2_A 9
19#define RPMH_RF_CLK3 10 19#define RPMH_RF_CLK3 10
20#define RPMH_RF_CLK3_A 11 20#define RPMH_RF_CLK3_A 11
21#define RPMH_IPA_CLK 12
21 22
22#endif 23#endif
diff --git a/include/dt-bindings/clock/r8a774a1-cpg-mssr.h b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
index 9bc5d45ff4b5..e355363f40c2 100644
--- a/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
@@ -54,5 +54,6 @@
54#define R8A774A1_CLK_CPEX 43 54#define R8A774A1_CLK_CPEX 43
55#define R8A774A1_CLK_R 44 55#define R8A774A1_CLK_R 44
56#define R8A774A1_CLK_OSC 45 56#define R8A774A1_CLK_OSC 45
57#define R8A774A1_CLK_CANFD 46
57 58
58#endif /* __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ */ 59#endif /* __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a774c0-cpg-mssr.h b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
index 8fe51b6aca28..8ad9cd6be8e9 100644
--- a/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
@@ -56,5 +56,6 @@
56#define R8A774C0_CLK_CSI0 45 56#define R8A774C0_CLK_CSI0 45
57#define R8A774C0_CLK_CP 46 57#define R8A774C0_CLK_CP 46
58#define R8A774C0_CLK_CPEX 47 58#define R8A774C0_CLK_CPEX 47
59#define R8A774C0_CLK_CANFD 48
59 60
60#endif /* __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ */ 61#endif /* __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
index 90ec780bfc68..4cdaf135829c 100644
--- a/include/dt-bindings/clock/stm32mp1-clks.h
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -248,7 +248,4 @@
248 248
249#define STM32MP1_LAST_CLK 232 249#define STM32MP1_LAST_CLK 232
250 250
251#define LTDC_K LTDC_PX
252#define ETHMAC_K ETHCK_K
253
254#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */ 251#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
index ad6f55dabd6d..0f2e0fe45ca4 100644
--- a/include/dt-bindings/reset/amlogic,meson-axg-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
@@ -1,12 +1,11 @@
1/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
1/* 2/*
2 *
3 * Copyright (c) 2016 BayLibre, SAS. 3 * Copyright (c) 2016 BayLibre, SAS.
4 * Author: Neil Armstrong <narmstrong@baylibre.com> 4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 * 5 *
6 * Copyright (c) 2017 Amlogic, inc. 6 * Copyright (c) 2017 Amlogic, inc.
7 * Author: Yixun Lan <yixun.lan@amlogic.com> 7 * Author: Yixun Lan <yixun.lan@amlogic.com>
8 * 8 *
9 * SPDX-License-Identifier: (GPL-2.0+ OR BSD)
10 */ 9 */
11 10
12#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H 11#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H
diff --git a/include/dt-bindings/reset/g12a-aoclkc.h b/include/dt-bindings/reset/g12a-aoclkc.h
new file mode 100644
index 000000000000..bd2e2337135c
--- /dev/null
+++ b/include/dt-bindings/reset/g12a-aoclkc.h
@@ -0,0 +1,18 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Copyright (c) 2016 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 */
6
7#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_G12A_AOCLK
8#define DT_BINDINGS_RESET_AMLOGIC_MESON_G12A_AOCLK
9
10#define RESET_AO_IR_IN 0
11#define RESET_AO_UART 1
12#define RESET_AO_I2C_M 2
13#define RESET_AO_I2C_S 3
14#define RESET_AO_SAR_ADC 4
15#define RESET_AO_UART2 5
16#define RESET_AO_IR_OUT 6
17
18#endif
diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h
new file mode 100644
index 000000000000..a726dd3f1dc6
--- /dev/null
+++ b/include/keys/request_key_auth-type.h
@@ -0,0 +1,36 @@
1/* request_key authorisation token key type
2 *
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H
13#define _KEYS_REQUEST_KEY_AUTH_TYPE_H
14
15#include <linux/key.h>
16
17/*
18 * Authorisation record for request_key().
19 */
20struct request_key_auth {
21 struct key *target_key;
22 struct key *dest_keyring;
23 const struct cred *cred;
24 void *callout_info;
25 size_t callout_len;
26 pid_t pid;
27 char op[8];
28} __randomize_layout;
29
30static inline struct request_key_auth *get_request_key_auth(const struct key *key)
31{
32 return key->payload.data[0];
33}
34
35
36#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index e098cbe27db5..12babe991594 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -31,7 +31,7 @@
31struct user_key_payload { 31struct user_key_payload {
32 struct rcu_head rcu; /* RCU destructor */ 32 struct rcu_head rcu; /* RCU destructor */
33 unsigned short datalen; /* length of this data */ 33 unsigned short datalen; /* length of this data */
34 char data[0]; /* actual data */ 34 char data[0] __aligned(__alignof__(u64)); /* actual data */
35}; 35};
36 36
37extern struct key_type key_type_user; 37extern struct key_type key_type_user;
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4f31f96bbfab..c36c86f1ec9a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -100,7 +100,7 @@ enum vgic_irq_config {
100}; 100};
101 101
102struct vgic_irq { 102struct vgic_irq {
103 spinlock_t irq_lock; /* Protects the content of the struct */ 103 raw_spinlock_t irq_lock; /* Protects the content of the struct */
104 struct list_head lpi_list; /* Used to link all LPIs together */ 104 struct list_head lpi_list; /* Used to link all LPIs together */
105 struct list_head ap_list; 105 struct list_head ap_list;
106 106
@@ -256,7 +256,7 @@ struct vgic_dist {
256 u64 propbaser; 256 u64 propbaser;
257 257
258 /* Protects the lpi_list and the count value below. */ 258 /* Protects the lpi_list and the count value below. */
259 spinlock_t lpi_list_lock; 259 raw_spinlock_t lpi_list_lock;
260 struct list_head lpi_list_head; 260 struct list_head lpi_list_head;
261 int lpi_list_count; 261 int lpi_list_count;
262 262
@@ -307,7 +307,7 @@ struct vgic_cpu {
307 unsigned int used_lrs; 307 unsigned int used_lrs;
308 struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; 308 struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
309 309
310 spinlock_t ap_list_lock; /* Protects the ap_list */ 310 raw_spinlock_t ap_list_lock; /* Protects the ap_list */
311 311
312 /* 312 /*
313 * List of IRQs that this VCPU should consider because they are either 313 * List of IRQs that this VCPU should consider because they are either
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index c31157135598..07e02d6df5ad 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -190,6 +190,7 @@ struct backing_dev_info {
190 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ 190 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
191 struct rb_root cgwb_congested_tree; /* their congested states */ 191 struct rb_root cgwb_congested_tree; /* their congested states */
192 struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ 192 struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
193 struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
193#else 194#else
194 struct bdi_writeback_congested *wb_congested; 195 struct bdi_writeback_congested *wb_congested;
195#endif 196#endif
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 7cca5f859a90..f3c43519baa7 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -6,6 +6,7 @@
6 6
7struct bcma_soc { 7struct bcma_soc {
8 struct bcma_bus bus; 8 struct bcma_bus bus;
9 struct device *dev;
9}; 10};
10 11
11int __init bcma_host_soc_register(struct bcma_soc *soc); 12int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5c7e7f859a24..d66bf5f32610 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -287,7 +287,7 @@ enum req_opf {
287 REQ_OP_DISCARD = 3, 287 REQ_OP_DISCARD = 3,
288 /* securely erase sectors */ 288 /* securely erase sectors */
289 REQ_OP_SECURE_ERASE = 5, 289 REQ_OP_SECURE_ERASE = 5,
290 /* seset a zone write pointer */ 290 /* reset a zone write pointer */
291 REQ_OP_ZONE_RESET = 6, 291 REQ_OP_ZONE_RESET = 6,
292 /* write the same sector many times */ 292 /* write the same sector many times */
293 REQ_OP_WRITE_SAME = 7, 293 REQ_OP_WRITE_SAME = 7,
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 8804753805ac..7bb2d8de9f30 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -116,7 +116,13 @@ extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
116 116
117static inline sector_t blk_rq_trace_sector(struct request *rq) 117static inline sector_t blk_rq_trace_sector(struct request *rq)
118{ 118{
119 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq); 119 /*
120 * Tracing should ignore starting sector for passthrough requests and
121 * requests where starting sector didn't get set.
122 */
123 if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
124 return 0;
125 return blk_rq_pos(rq);
120} 126}
121 127
122static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) 128static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 27b74947cd2b..573cca00a0e6 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -172,6 +172,7 @@ struct bpf_verifier_state_list {
172#define BPF_ALU_SANITIZE_SRC 1U 172#define BPF_ALU_SANITIZE_SRC 1U
173#define BPF_ALU_SANITIZE_DST 2U 173#define BPF_ALU_SANITIZE_DST 2U
174#define BPF_ALU_NEG_VALUE (1U << 2) 174#define BPF_ALU_NEG_VALUE (1U << 2)
175#define BPF_ALU_NON_POINTER (1U << 3)
175#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ 176#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
176 BPF_ALU_SANITIZE_DST) 177 BPF_ALU_SANITIZE_DST)
177 178
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
index f02cee0225d4..d815622cd31e 100644
--- a/include/linux/bpfilter.h
+++ b/include/linux/bpfilter.h
@@ -3,13 +3,22 @@
3#define _LINUX_BPFILTER_H 3#define _LINUX_BPFILTER_H
4 4
5#include <uapi/linux/bpfilter.h> 5#include <uapi/linux/bpfilter.h>
6#include <linux/umh.h>
6 7
7struct sock; 8struct sock;
8int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, 9int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
9 unsigned int optlen); 10 unsigned int optlen);
10int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, 11int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
11 int __user *optlen); 12 int __user *optlen);
12extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, 13struct bpfilter_umh_ops {
13 char __user *optval, 14 struct umh_info info;
14 unsigned int optlen, bool is_set); 15 /* since ip_getsockopt() can run in parallel, serialize access to umh */
16 struct mutex lock;
17 int (*sockopt)(struct sock *sk, int optname,
18 char __user *optval,
19 unsigned int optlen, bool is_set);
20 int (*start)(void);
21 bool stop;
22};
23extern struct bpfilter_umh_ops bpfilter_ops;
15#endif 24#endif
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 68bb09c29ce8..a420c07904bc 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -35,6 +35,7 @@
35#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ 35#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
36#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ 36#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
37#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ 37#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
38#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */
38 39
39#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) 40#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
40 41
@@ -53,7 +54,7 @@ struct ceph_options {
53 unsigned long osd_request_timeout; /* jiffies */ 54 unsigned long osd_request_timeout; /* jiffies */
54 55
55 /* 56 /*
56 * any type that can't be simply compared or doesn't need need 57 * any type that can't be simply compared or doesn't need
57 * to be compared should go beyond this point, 58 * to be compared should go beyond this point,
58 * ceph_compare_options() should be updated accordingly 59 * ceph_compare_options() should be updated accordingly
59 */ 60 */
@@ -281,7 +282,8 @@ extern struct ceph_options *ceph_parse_options(char *options,
281 const char *dev_name, const char *dev_name_end, 282 const char *dev_name, const char *dev_name_end,
282 int (*parse_extra_token)(char *c, void *private), 283 int (*parse_extra_token)(char *c, void *private),
283 void *private); 284 void *private);
284int ceph_print_client_options(struct seq_file *m, struct ceph_client *client); 285int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
286 bool show_all);
285extern void ceph_destroy_options(struct ceph_options *opt); 287extern void ceph_destroy_options(struct ceph_options *opt);
286extern int ceph_compare_options(struct ceph_options *new_opt, 288extern int ceph_compare_options(struct ceph_options *new_opt,
287 struct ceph_client *client); 289 struct ceph_client *client);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 7a2af5034278..2294f963dab7 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -354,7 +354,6 @@ struct ceph_osd_client {
354 struct rb_root linger_map_checks; 354 struct rb_root linger_map_checks;
355 atomic_t num_requests; 355 atomic_t num_requests;
356 atomic_t num_homeless; 356 atomic_t num_homeless;
357 bool abort_on_full; /* abort w/ ENOSPC when full */
358 int abort_err; 357 int abort_err;
359 struct delayed_work timeout_work; 358 struct delayed_work timeout_work;
360 struct delayed_work osds_timeout_work; 359 struct delayed_work osds_timeout_work;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index e443fa9fa859..b7cf80a71293 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -792,6 +792,9 @@ unsigned int __clk_get_enable_count(struct clk *clk);
792unsigned long clk_hw_get_rate(const struct clk_hw *hw); 792unsigned long clk_hw_get_rate(const struct clk_hw *hw);
793unsigned long __clk_get_flags(struct clk *clk); 793unsigned long __clk_get_flags(struct clk *clk);
794unsigned long clk_hw_get_flags(const struct clk_hw *hw); 794unsigned long clk_hw_get_flags(const struct clk_hw *hw);
795#define clk_hw_can_set_rate_parent(hw) \
796 (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
797
795bool clk_hw_is_prepared(const struct clk_hw *hw); 798bool clk_hw_is_prepared(const struct clk_hw *hw);
796bool clk_hw_rate_is_protected(const struct clk_hw *hw); 799bool clk_hw_rate_is_protected(const struct clk_hw *hw);
797bool clk_hw_is_enabled(const struct clk_hw *hw); 800bool clk_hw_is_enabled(const struct clk_hw *hw);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index a7773b5c0b9f..d8bc1a856b39 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -384,6 +384,17 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
384struct clk *devm_clk_get(struct device *dev, const char *id); 384struct clk *devm_clk_get(struct device *dev, const char *id);
385 385
386/** 386/**
387 * devm_clk_get_optional - lookup and obtain a managed reference to an optional
388 * clock producer.
389 * @dev: device for clock "consumer"
390 * @id: clock consumer ID
391 *
392 * Behaves the same as devm_clk_get() except where there is no clock producer.
393 * In this case, instead of returning -ENOENT, the function returns NULL.
394 */
395struct clk *devm_clk_get_optional(struct device *dev, const char *id);
396
397/**
387 * devm_get_clk_from_child - lookup and obtain a managed reference to a 398 * devm_get_clk_from_child - lookup and obtain a managed reference to a
388 * clock producer from child node. 399 * clock producer from child node.
389 * @dev: device for clock "consumer" 400 * @dev: device for clock "consumer"
@@ -718,6 +729,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
718 return NULL; 729 return NULL;
719} 730}
720 731
732static inline struct clk *devm_clk_get_optional(struct device *dev,
733 const char *id)
734{
735 return NULL;
736}
737
721static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 738static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
722 struct clk_bulk_data *clks) 739 struct clk_bulk_data *clks)
723{ 740{
@@ -862,6 +879,25 @@ static inline void clk_bulk_disable_unprepare(int num_clks,
862 clk_bulk_unprepare(num_clks, clks); 879 clk_bulk_unprepare(num_clks, clks);
863} 880}
864 881
882/**
883 * clk_get_optional - lookup and obtain a reference to an optional clock
884 * producer.
885 * @dev: device for clock "consumer"
886 * @id: clock consumer ID
887 *
888 * Behaves the same as clk_get() except where there is no clock producer. In
889 * this case, instead of returning -ENOENT, the function returns NULL.
890 */
891static inline struct clk *clk_get_optional(struct device *dev, const char *id)
892{
893 struct clk *clk = clk_get(dev, id);
894
895 if (clk == ERR_PTR(-ENOENT))
896 return NULL;
897
898 return clk;
899}
900
865#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 901#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
866struct clk *of_clk_get(struct device_node *np, int index); 902struct clk *of_clk_get(struct device_node *np, int index);
867struct clk *of_clk_get_by_name(struct device_node *np, const char *name); 903struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index eacc5df57b99..78872efc7be0 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -160,6 +160,7 @@ struct clk_hw_omap {
160 struct clockdomain *clkdm; 160 struct clockdomain *clkdm;
161 const struct clk_hw_omap_ops *ops; 161 const struct clk_hw_omap_ops *ops;
162 u32 context; 162 u32 context;
163 int autoidle_count;
163}; 164};
164 165
165/* 166/*
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 4890ff033220..ccb32af5848b 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -52,4 +52,8 @@ int clk_add_alias(const char *, const char *, const char *, struct device *);
52int clk_register_clkdev(struct clk *, const char *, const char *); 52int clk_register_clkdev(struct clk *, const char *, const char *);
53int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); 53int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
54 54
55int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
56 const char *con_id, const char *dev_id);
57void devm_clk_release_clkdev(struct device *dev, const char *con_id,
58 const char *dev_id);
55#endif 59#endif
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 39f668d5066b..333a6695a918 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -3,9 +3,8 @@
3#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." 3#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
4#endif 4#endif
5 5
6/* Some compiler specific definitions are overwritten here 6/* Compiler specific definitions for Clang compiler */
7 * for Clang compiler 7
8 */
9#define uninitialized_var(x) x = *(&(x)) 8#define uninitialized_var(x) x = *(&(x))
10 9
11/* same as gcc, this was present in clang-2.6 so we can assume it works 10/* same as gcc, this was present in clang-2.6 so we can assume it works
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 5776da43da97..e8579412ad21 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -58,17 +58,13 @@
58 (typeof(ptr)) (__ptr + (off)); \ 58 (typeof(ptr)) (__ptr + (off)); \
59}) 59})
60 60
61/* Make the optimizer believe the variable can be manipulated arbitrarily. */
62#define OPTIMIZER_HIDE_VAR(var) \
63 __asm__ ("" : "=r" (var) : "0" (var))
64
65/* 61/*
66 * A trick to suppress uninitialized variable warning without generating any 62 * A trick to suppress uninitialized variable warning without generating any
67 * code 63 * code
68 */ 64 */
69#define uninitialized_var(x) x = x 65#define uninitialized_var(x) x = x
70 66
71#ifdef RETPOLINE 67#ifdef CONFIG_RETPOLINE
72#define __noretpoline __attribute__((__indirect_branch__("keep"))) 68#define __noretpoline __attribute__((__indirect_branch__("keep")))
73#endif 69#endif
74 70
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 517bd14e1222..b17f3cd18334 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -5,9 +5,7 @@
5 5
6#ifdef __ECC 6#ifdef __ECC
7 7
8/* Some compiler specific definitions are overwritten here 8/* Compiler specific definitions for Intel ECC compiler */
9 * for Intel ECC compiler
10 */
11 9
12#include <asm/intrinsics.h> 10#include <asm/intrinsics.h>
13 11
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index fc5004a4b07d..445348facea9 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
161#endif 161#endif
162 162
163#ifndef OPTIMIZER_HIDE_VAR 163#ifndef OPTIMIZER_HIDE_VAR
164#define OPTIMIZER_HIDE_VAR(var) barrier() 164/* Make the optimizer believe the variable can be manipulated arbitrarily. */
165#define OPTIMIZER_HIDE_VAR(var) \
166 __asm__ ("" : "=r" (var) : "0" (var))
165#endif 167#endif
166 168
167/* Not-quite-unique ID. */ 169/* Not-quite-unique ID. */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 19f32b0c29af..6b318efd8a74 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -34,6 +34,7 @@
34#ifndef __has_attribute 34#ifndef __has_attribute
35# define __has_attribute(x) __GCC4_has_attribute_##x 35# define __has_attribute(x) __GCC4_has_attribute_##x
36# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) 36# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
37# define __GCC4_has_attribute___copy__ 0
37# define __GCC4_has_attribute___designated_init__ 0 38# define __GCC4_has_attribute___designated_init__ 0
38# define __GCC4_has_attribute___externally_visible__ 1 39# define __GCC4_has_attribute___externally_visible__ 1
39# define __GCC4_has_attribute___noclone__ 1 40# define __GCC4_has_attribute___noclone__ 1
@@ -101,6 +102,19 @@
101#define __attribute_const__ __attribute__((__const__)) 102#define __attribute_const__ __attribute__((__const__))
102 103
103/* 104/*
105 * Optional: only supported since gcc >= 9
106 * Optional: not supported by clang
107 * Optional: not supported by icc
108 *
109 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute
110 */
111#if __has_attribute(__copy__)
112# define __copy(symbol) __attribute__((__copy__(symbol)))
113#else
114# define __copy(symbol)
115#endif
116
117/*
104 * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' 118 * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
105 * attribute warnings entirely and for good") for more information. 119 * attribute warnings entirely and for good") for more information.
106 * 120 *
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 218df7f4d3e1..5041357d0297 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -180,12 +180,10 @@ enum cpuhp_smt_control {
180#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) 180#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
181extern enum cpuhp_smt_control cpu_smt_control; 181extern enum cpuhp_smt_control cpu_smt_control;
182extern void cpu_smt_disable(bool force); 182extern void cpu_smt_disable(bool force);
183extern void cpu_smt_check_topology_early(void);
184extern void cpu_smt_check_topology(void); 183extern void cpu_smt_check_topology(void);
185#else 184#else
186# define cpu_smt_control (CPU_SMT_ENABLED) 185# define cpu_smt_control (CPU_SMT_ENABLED)
187static inline void cpu_smt_disable(bool force) { } 186static inline void cpu_smt_disable(bool force) { }
188static inline void cpu_smt_check_topology_early(void) { }
189static inline void cpu_smt_check_topology(void) { } 187static inline void cpu_smt_check_topology(void) { }
190#endif 188#endif
191 189
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index ef4b70f64f33..60996e64c579 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -62,9 +62,10 @@ extern const struct qstr slash_name;
62struct dentry_stat_t { 62struct dentry_stat_t {
63 long nr_dentry; 63 long nr_dentry;
64 long nr_unused; 64 long nr_unused;
65 long age_limit; /* age in seconds */ 65 long age_limit; /* age in seconds */
66 long want_pages; /* pages requested by system */ 66 long want_pages; /* pages requested by system */
67 long dummy[2]; 67 long nr_negative; /* # of unused negative dentries */
68 long dummy; /* Reserved for future use */
68}; 69};
69extern struct dentry_stat_t dentry_stat; 70extern struct dentry_stat_t dentry_stat;
70 71
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index cef2127e1d70..f6ded992c183 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -717,15 +717,6 @@ static inline unsigned long dma_max_pfn(struct device *dev)
717} 717}
718#endif 718#endif
719 719
720/*
721 * Please always use dma_alloc_coherent instead as it already zeroes the memory!
722 */
723static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
724 dma_addr_t *dma_handle, gfp_t flag)
725{
726 return dma_alloc_coherent(dev, size, dma_handle, flag);
727}
728
729static inline int dma_get_cache_alignment(void) 720static inline int dma_get_cache_alignment(void)
730{ 721{
731#ifdef ARCH_DMA_MINALIGN 722#ifdef ARCH_DMA_MINALIGN
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 45ff763fba76..28604a8d0aa9 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1198,8 +1198,6 @@ static inline bool efi_enabled(int feature)
1198extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); 1198extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
1199 1199
1200extern bool efi_is_table_address(unsigned long phys_addr); 1200extern bool efi_is_table_address(unsigned long phys_addr);
1201
1202extern int efi_apply_persistent_mem_reservations(void);
1203#else 1201#else
1204static inline bool efi_enabled(int feature) 1202static inline bool efi_enabled(int feature)
1205{ 1203{
@@ -1218,11 +1216,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
1218{ 1216{
1219 return false; 1217 return false;
1220} 1218}
1221
1222static inline int efi_apply_persistent_mem_reservations(void)
1223{
1224 return 0;
1225}
1226#endif 1219#endif
1227 1220
1228extern int efi_status_to_err(efi_status_t status); 1221extern int efi_status_to_err(efi_status_t status);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 7cdd31a69719..f52ef0ad6781 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info);
653 653
654extern struct fb_info *registered_fb[FB_MAX]; 654extern struct fb_info *registered_fb[FB_MAX];
655extern int num_registered_fb; 655extern int num_registered_fb;
656extern bool fb_center_logo;
656extern struct class *fb_class; 657extern struct class *fb_class;
657 658
658#define for_each_registered_fb(i) \ 659#define for_each_registered_fb(i) \
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ad106d845b22..e532fcc6e4b5 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
591 return qdisc_skb_cb(skb)->data; 591 return qdisc_skb_cb(skb)->data;
592} 592}
593 593
594static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, 594static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
595 struct sk_buff *skb) 595 struct sk_buff *skb)
596{ 596{
597 u8 *cb_data = bpf_skb_cb(skb); 597 u8 *cb_data = bpf_skb_cb(skb);
598 u8 cb_saved[BPF_SKB_CB_LEN]; 598 u8 cb_saved[BPF_SKB_CB_LEN];
@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
611 return res; 611 return res;
612} 612}
613 613
614static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
615 struct sk_buff *skb)
616{
617 u32 res;
618
619 preempt_disable();
620 res = __bpf_prog_run_save_cb(prog, skb);
621 preempt_enable();
622 return res;
623}
624
614static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, 625static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
615 struct sk_buff *skb) 626 struct sk_buff *skb)
616{ 627{
617 u8 *cb_data = bpf_skb_cb(skb); 628 u8 *cb_data = bpf_skb_cb(skb);
629 u32 res;
618 630
619 if (unlikely(prog->cb_access)) 631 if (unlikely(prog->cb_access))
620 memset(cb_data, 0, BPF_SKB_CB_LEN); 632 memset(cb_data, 0, BPF_SKB_CB_LEN);
621 633
622 return BPF_PROG_RUN(prog, skb); 634 preempt_disable();
635 res = BPF_PROG_RUN(prog, skb);
636 preempt_enable();
637 return res;
623} 638}
624 639
625static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, 640static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 811c77743dad..29d8e2cfed0e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1479,11 +1479,12 @@ struct super_block {
1479 struct user_namespace *s_user_ns; 1479 struct user_namespace *s_user_ns;
1480 1480
1481 /* 1481 /*
1482 * Keep the lru lists last in the structure so they always sit on their 1482 * The list_lru structure is essentially just a pointer to a table
1483 * own individual cachelines. 1483 * of per-node lru lists, each of which has its own spinlock.
1484 * There is no need to put them into separate cachelines.
1484 */ 1485 */
1485 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; 1486 struct list_lru s_dentry_lru;
1486 struct list_lru s_inode_lru ____cacheline_aligned_in_smp; 1487 struct list_lru s_inode_lru;
1487 struct rcu_head rcu; 1488 struct rcu_head rcu;
1488 struct work_struct destroy_work; 1489 struct work_struct destroy_work;
1489 1490
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8663f216c563..2d6100edf204 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -24,7 +24,10 @@
24 24
25#ifdef CONFIG_DEBUG_FS 25#ifdef CONFIG_DEBUG_FS
26 26
27#include <linux/kfifo.h>
28
27#define HID_DEBUG_BUFSIZE 512 29#define HID_DEBUG_BUFSIZE 512
30#define HID_DEBUG_FIFOSIZE 512
28 31
29void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); 32void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
30void hid_dump_report(struct hid_device *, int , u8 *, int); 33void hid_dump_report(struct hid_device *, int , u8 *, int);
@@ -37,11 +40,8 @@ void hid_debug_init(void);
37void hid_debug_exit(void); 40void hid_debug_exit(void);
38void hid_debug_event(struct hid_device *, char *); 41void hid_debug_event(struct hid_device *, char *);
39 42
40
41struct hid_debug_list { 43struct hid_debug_list {
42 char *hid_debug_buf; 44 DECLARE_KFIFO_PTR(hid_debug_fifo, char);
43 int head;
44 int tail;
45 struct fasync_struct *fasync; 45 struct fasync_struct *fasync;
46 struct hid_device *hdev; 46 struct hid_device *hdev;
47 struct list_head node; 47 struct list_head node;
@@ -64,4 +64,3 @@ struct hid_debug_list {
64#endif 64#endif
65 65
66#endif 66#endif
67
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d99287327ef2..f9707d1dcb58 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -430,7 +430,7 @@ struct hid_local {
430 */ 430 */
431 431
432struct hid_collection { 432struct hid_collection {
433 struct hid_collection *parent; 433 int parent_idx; /* device->collection */
434 unsigned type; 434 unsigned type;
435 unsigned usage; 435 unsigned usage;
436 unsigned level; 436 unsigned level;
@@ -658,7 +658,6 @@ struct hid_parser {
658 unsigned int *collection_stack; 658 unsigned int *collection_stack;
659 unsigned int collection_stack_ptr; 659 unsigned int collection_stack_ptr;
660 unsigned int collection_stack_size; 660 unsigned int collection_stack_size;
661 struct hid_collection *active_collection;
662 struct hid_device *device; 661 struct hid_device *device;
663 unsigned int scan_flags; 662 unsigned int scan_flags;
664}; 663};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index f0885cc01db6..dcb6977afce9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1159,8 +1159,9 @@ struct hv_ring_buffer_debug_info {
1159 u32 bytes_avail_towrite; 1159 u32 bytes_avail_towrite;
1160}; 1160};
1161 1161
1162void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, 1162
1163 struct hv_ring_buffer_debug_info *debug_info); 1163int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
1164 struct hv_ring_buffer_debug_info *debug_info);
1164 1165
1165/* Vmbus interface */ 1166/* Vmbus interface */
1166#define vmbus_driver_register(driver) \ 1167#define vmbus_driver_register(driver) \
diff --git a/include/linux/ide.h b/include/linux/ide.h
index e7d29ae633cd..971cf76a78a0 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -615,6 +615,7 @@ struct ide_drive_s {
615 615
616 /* current sense rq and buffer */ 616 /* current sense rq and buffer */
617 bool sense_rq_armed; 617 bool sense_rq_armed;
618 bool sense_rq_active;
618 struct request *sense_rq; 619 struct request *sense_rq;
619 struct request_sense sense_data; 620 struct request_sense sense_data;
620 621
@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
1219extern void ide_timer_expiry(struct timer_list *t); 1220extern void ide_timer_expiry(struct timer_list *t);
1220extern irqreturn_t ide_intr(int irq, void *dev_id); 1221extern irqreturn_t ide_intr(int irq, void *dev_id);
1221extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 1222extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
1223extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
1222extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); 1224extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
1223 1225
1224void ide_init_disk(struct gendisk *, ide_drive_t *); 1226void ide_init_disk(struct gendisk *, ide_drive_t *);
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 6756fea18b69..e44746de95cd 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
54 case ARPHRD_IPGRE: 54 case ARPHRD_IPGRE:
55 case ARPHRD_VOID: 55 case ARPHRD_VOID:
56 case ARPHRD_NONE: 56 case ARPHRD_NONE:
57 case ARPHRD_RAWIP:
57 return false; 58 return false;
58 default: 59 default:
59 return true; 60 return true;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c672f34235e7..4a728dba02e2 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -260,6 +260,7 @@ struct irq_affinity {
260/** 260/**
261 * struct irq_affinity_desc - Interrupt affinity descriptor 261 * struct irq_affinity_desc - Interrupt affinity descriptor
262 * @mask: cpumask to hold the affinity assignment 262 * @mask: cpumask to hold the affinity assignment
263 * @is_managed: 1 if the interrupt is managed internally
263 */ 264 */
264struct irq_affinity_desc { 265struct irq_affinity_desc {
265 struct cpumask mask; 266 struct cpumask mask;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 071b4cbdf010..c848a7cc502e 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -319,7 +319,7 @@
319#define GITS_TYPER_PLPIS (1UL << 0) 319#define GITS_TYPER_PLPIS (1UL << 0)
320#define GITS_TYPER_VLPIS (1UL << 1) 320#define GITS_TYPER_VLPIS (1UL << 1)
321#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 321#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
322#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1) 322#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
323#define GITS_TYPER_IDBITS_SHIFT 8 323#define GITS_TYPER_IDBITS_SHIFT 8
324#define GITS_TYPER_DEVBITS_SHIFT 13 324#define GITS_TYPER_DEVBITS_SHIFT 13
325#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) 325#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index bc9af551fc83..e49d1de0614e 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -21,15 +21,6 @@ struct kernel_pkey_query;
21struct kernel_pkey_params; 21struct kernel_pkey_params;
22 22
23/* 23/*
24 * key under-construction record
25 * - passed to the request_key actor if supplied
26 */
27struct key_construction {
28 struct key *key; /* key being constructed */
29 struct key *authkey;/* authorisation for key being constructed */
30};
31
32/*
33 * Pre-parsed payload, used by key add, update and instantiate. 24 * Pre-parsed payload, used by key add, update and instantiate.
34 * 25 *
35 * This struct will be cleared and data and datalen will be set with the data 26 * This struct will be cleared and data and datalen will be set with the data
@@ -50,8 +41,7 @@ struct key_preparsed_payload {
50 time64_t expiry; /* Expiry time of key */ 41 time64_t expiry; /* Expiry time of key */
51} __randomize_layout; 42} __randomize_layout;
52 43
53typedef int (*request_key_actor_t)(struct key_construction *key, 44typedef int (*request_key_actor_t)(struct key *auth_key, void *aux);
54 const char *op, void *aux);
55 45
56/* 46/*
57 * Preparsed matching criterion. 47 * Preparsed matching criterion.
@@ -181,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key,
181 const void *data, 171 const void *data,
182 size_t datalen, 172 size_t datalen,
183 struct key *keyring, 173 struct key *keyring,
184 struct key *instkey); 174 struct key *authkey);
185extern int key_reject_and_link(struct key *key, 175extern int key_reject_and_link(struct key *key,
186 unsigned timeout, 176 unsigned timeout,
187 unsigned error, 177 unsigned error,
188 struct key *keyring, 178 struct key *keyring,
189 struct key *instkey); 179 struct key *authkey);
190extern void complete_request_key(struct key_construction *cons, int error); 180extern void complete_request_key(struct key *authkey, int error);
191 181
192static inline int key_negate_and_link(struct key *key, 182static inline int key_negate_and_link(struct key *key,
193 unsigned timeout, 183 unsigned timeout,
194 struct key *keyring, 184 struct key *keyring,
195 struct key *instkey) 185 struct key *authkey)
196{ 186{
197 return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey); 187 return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey);
198} 188}
199 189
200extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); 190extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 5440f11b0907..ad609617aeb8 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -160,6 +160,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
160} 160}
161 161
162enum nvdimm_security_state { 162enum nvdimm_security_state {
163 NVDIMM_SECURITY_ERROR = -1,
163 NVDIMM_SECURITY_DISABLED, 164 NVDIMM_SECURITY_DISABLED,
164 NVDIMM_SECURITY_UNLOCKED, 165 NVDIMM_SECURITY_UNLOCKED,
165 NVDIMM_SECURITY_LOCKED, 166 NVDIMM_SECURITY_LOCKED,
@@ -234,7 +235,6 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
234 cmd_mask, num_flush, flush_wpq, NULL, NULL); 235 cmd_mask, num_flush, flush_wpq, NULL, NULL);
235} 236}
236 237
237int nvdimm_security_setup_events(struct nvdimm *nvdimm);
238const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); 238const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
239const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); 239const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
240u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, 240u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 64c41cf45590..859b55b66db2 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -29,9 +29,6 @@ extern unsigned long max_pfn;
29 */ 29 */
30extern unsigned long long max_possible_pfn; 30extern unsigned long long max_possible_pfn;
31 31
32#define INIT_MEMBLOCK_REGIONS 128
33#define INIT_PHYSMEM_REGIONS 4
34
35/** 32/**
36 * enum memblock_flags - definition of memory region attributes 33 * enum memblock_flags - definition of memory region attributes
37 * @MEMBLOCK_NONE: no special request 34 * @MEMBLOCK_NONE: no special request
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 07da5c6c5ba0..368267c1b71b 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -21,14 +21,16 @@ struct vmem_altmap;
21 * walkers which rely on the fully initialized page->flags and others 21 * walkers which rely on the fully initialized page->flags and others
22 * should use this rather than pfn_valid && pfn_to_page 22 * should use this rather than pfn_valid && pfn_to_page
23 */ 23 */
24#define pfn_to_online_page(pfn) \ 24#define pfn_to_online_page(pfn) \
25({ \ 25({ \
26 struct page *___page = NULL; \ 26 struct page *___page = NULL; \
27 unsigned long ___nr = pfn_to_section_nr(pfn); \ 27 unsigned long ___pfn = pfn; \
28 \ 28 unsigned long ___nr = pfn_to_section_nr(___pfn); \
29 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\ 29 \
30 ___page = pfn_to_page(pfn); \ 30 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
31 ___page; \ 31 pfn_valid_within(___pfn)) \
32 ___page = pfn_to_page(___pfn); \
33 ___page; \
32}) 34})
33 35
34/* 36/*
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 9a9631f0559e..fc91082d4c35 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -2791,6 +2791,100 @@ struct ec_response_battery_vendor_param {
2791} __packed; 2791} __packed;
2792 2792
2793/*****************************************************************************/ 2793/*****************************************************************************/
2794/* Commands for I2S recording on audio codec. */
2795
2796#define EC_CMD_CODEC_I2S 0x00BC
2797
2798enum ec_codec_i2s_subcmd {
2799 EC_CODEC_SET_SAMPLE_DEPTH = 0x0,
2800 EC_CODEC_SET_GAIN = 0x1,
2801 EC_CODEC_GET_GAIN = 0x2,
2802 EC_CODEC_I2S_ENABLE = 0x3,
2803 EC_CODEC_I2S_SET_CONFIG = 0x4,
2804 EC_CODEC_I2S_SET_TDM_CONFIG = 0x5,
2805 EC_CODEC_I2S_SET_BCLK = 0x6,
2806};
2807
2808enum ec_sample_depth_value {
2809 EC_CODEC_SAMPLE_DEPTH_16 = 0,
2810 EC_CODEC_SAMPLE_DEPTH_24 = 1,
2811};
2812
2813enum ec_i2s_config {
2814 EC_DAI_FMT_I2S = 0,
2815 EC_DAI_FMT_RIGHT_J = 1,
2816 EC_DAI_FMT_LEFT_J = 2,
2817 EC_DAI_FMT_PCM_A = 3,
2818 EC_DAI_FMT_PCM_B = 4,
2819 EC_DAI_FMT_PCM_TDM = 5,
2820};
2821
2822struct ec_param_codec_i2s {
2823 /*
2824 * enum ec_codec_i2s_subcmd
2825 */
2826 uint8_t cmd;
2827 union {
2828 /*
2829 * EC_CODEC_SET_SAMPLE_DEPTH
2830 * Value should be one of ec_sample_depth_value.
2831 */
2832 uint8_t depth;
2833
2834 /*
2835 * EC_CODEC_SET_GAIN
2836 * Value should be 0~43 for both channels.
2837 */
2838 struct ec_param_codec_i2s_set_gain {
2839 uint8_t left;
2840 uint8_t right;
2841 } __packed gain;
2842
2843 /*
2844 * EC_CODEC_I2S_ENABLE
2845 * 1 to enable, 0 to disable.
2846 */
2847 uint8_t i2s_enable;
2848
2849 /*
2850 * EC_CODEC_I2S_SET_COFNIG
2851 * Value should be one of ec_i2s_config.
2852 */
2853 uint8_t i2s_config;
2854
2855 /*
2856 * EC_CODEC_I2S_SET_TDM_CONFIG
2857 * Value should be one of ec_i2s_config.
2858 */
2859 struct ec_param_codec_i2s_tdm {
2860 /*
2861 * 0 to 496
2862 */
2863 int16_t ch0_delay;
2864 /*
2865 * -1 to 496
2866 */
2867 int16_t ch1_delay;
2868 uint8_t adjacent_to_ch0;
2869 uint8_t adjacent_to_ch1;
2870 } __packed tdm_param;
2871
2872 /*
2873 * EC_CODEC_I2S_SET_BCLK
2874 */
2875 uint32_t bclk;
2876 };
2877} __packed;
2878
2879/*
2880 * For subcommand EC_CODEC_GET_GAIN.
2881 */
2882struct ec_response_codec_gain {
2883 uint8_t left;
2884 uint8_t right;
2885} __packed;
2886
2887/*****************************************************************************/
2794/* System commands */ 2888/* System commands */
2795 2889
2796/* 2890/*
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h
index ab16ad283def..2083fa20821d 100644
--- a/include/linux/mfd/ingenic-tcu.h
+++ b/include/linux/mfd/ingenic-tcu.h
@@ -41,7 +41,7 @@
41#define TCU_TCSR_PRESCALE_LSB 3 41#define TCU_TCSR_PRESCALE_LSB 3
42#define TCU_TCSR_PRESCALE_MASK 0x38 42#define TCU_TCSR_PRESCALE_MASK 0x38
43 43
44#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */ 44#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */
45#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */ 45#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
46#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */ 46#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
47 47
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index fe69c0f4398f..4d5d51a9c8a6 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -15,6 +15,7 @@
15#include <linux/gpio/consumer.h> 15#include <linux/gpio/consumer.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/mfd/madera/pdata.h> 17#include <linux/mfd/madera/pdata.h>
18#include <linux/mutex.h>
18#include <linux/notifier.h> 19#include <linux/notifier.h>
19#include <linux/regmap.h> 20#include <linux/regmap.h>
20#include <linux/regulator/consumer.h> 21#include <linux/regulator/consumer.h>
@@ -37,6 +38,8 @@ enum madera_type {
37 38
38#define MADERA_MAX_MICBIAS 4 39#define MADERA_MAX_MICBIAS 4
39 40
41#define MADERA_MAX_HP_OUTPUT 3
42
40/* Notifier events */ 43/* Notifier events */
41#define MADERA_NOTIFY_VOICE_TRIGGER 0x1 44#define MADERA_NOTIFY_VOICE_TRIGGER 0x1
42#define MADERA_NOTIFY_HPDET 0x2 45#define MADERA_NOTIFY_HPDET 0x2
@@ -183,6 +186,10 @@ struct madera {
183 unsigned int num_childbias[MADERA_MAX_MICBIAS]; 186 unsigned int num_childbias[MADERA_MAX_MICBIAS];
184 187
185 struct snd_soc_dapm_context *dapm; 188 struct snd_soc_dapm_context *dapm;
189 struct mutex dapm_ptr_lock;
190 unsigned int hp_ena;
191 bool out_clamp[MADERA_MAX_HP_OUTPUT];
192 bool out_shorted[MADERA_MAX_HP_OUTPUT];
186 193
187 struct blocking_notifier_head notifier; 194 struct blocking_notifier_head notifier;
188}; 195};
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index b9a53e013bff..483168403ae5 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -78,6 +78,8 @@
78#define STEPCONFIG_YNN BIT(8) 78#define STEPCONFIG_YNN BIT(8)
79#define STEPCONFIG_XNP BIT(9) 79#define STEPCONFIG_XNP BIT(9)
80#define STEPCONFIG_YPN BIT(10) 80#define STEPCONFIG_YPN BIT(10)
81#define STEPCONFIG_RFP(val) ((val) << 12)
82#define STEPCONFIG_RFP_VREFP (0x3 << 12)
81#define STEPCONFIG_INM_MASK (0xF << 15) 83#define STEPCONFIG_INM_MASK (0xF << 15)
82#define STEPCONFIG_INM(val) ((val) << 15) 84#define STEPCONFIG_INM(val) ((val) << 15)
83#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) 85#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
@@ -86,6 +88,8 @@
86#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) 88#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
87#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) 89#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
88#define STEPCONFIG_FIFO1 BIT(26) 90#define STEPCONFIG_FIFO1 BIT(26)
91#define STEPCONFIG_RFM(val) ((val) << 23)
92#define STEPCONFIG_RFM_VREFN (0x3 << 23)
89 93
90/* Delay register */ 94/* Delay register */
91#define STEPDELAY_OPEN_MASK (0x3FFFF << 0) 95#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index e2687a30e5a1..739b7bf37eaa 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -79,7 +79,7 @@
79/* Some controllers have a CBSY bit */ 79/* Some controllers have a CBSY bit */
80#define TMIO_MMC_HAVE_CBSY BIT(11) 80#define TMIO_MMC_HAVE_CBSY BIT(11)
81 81
82/* Some controllers that support HS400 use use 4 taps while others use 8. */ 82/* Some controllers that support HS400 use 4 taps while others use 8. */
83#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) 83#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13)
84 84
85int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); 85int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index de7377815b6b..8ef330027b13 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -308,6 +308,7 @@ struct mmc_card {
308 unsigned int nr_parts; 308 unsigned int nr_parts;
309 309
310 unsigned int bouncesz; /* Bounce buffer size */ 310 unsigned int bouncesz; /* Bounce buffer size */
311 struct workqueue_struct *complete_wq; /* Private workqueue */
311}; 312};
312 313
313static inline bool mmc_large_sector(struct mmc_card *card) 314static inline bool mmc_large_sector(struct mmc_card *card)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index cc4a507d7ca4..842f9189537b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -520,6 +520,12 @@ enum pgdat_flags {
520 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 520 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
521}; 521};
522 522
523enum zone_flags {
524 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
525 * Cleared when kswapd is woken.
526 */
527};
528
523static inline unsigned long zone_managed_pages(struct zone *zone) 529static inline unsigned long zone_managed_pages(struct zone *zone)
524{ 530{
525 return (unsigned long)atomic_long_read(&zone->managed_pages); 531 return (unsigned long)atomic_long_read(&zone->managed_pages);
diff --git a/include/linux/module.h b/include/linux/module.h
index 9a21fe3509af..f5bc4c046461 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -129,13 +129,13 @@ extern void cleanup_module(void);
129#define module_init(initfn) \ 129#define module_init(initfn) \
130 static inline initcall_t __maybe_unused __inittest(void) \ 130 static inline initcall_t __maybe_unused __inittest(void) \
131 { return initfn; } \ 131 { return initfn; } \
132 int init_module(void) __attribute__((alias(#initfn))); 132 int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
133 133
134/* This is only required if you want to be unloadable. */ 134/* This is only required if you want to be unloadable. */
135#define module_exit(exitfn) \ 135#define module_exit(exitfn) \
136 static inline exitcall_t __maybe_unused __exittest(void) \ 136 static inline exitcall_t __maybe_unused __exittest(void) \
137 { return exitfn; } \ 137 { return exitfn; } \
138 void cleanup_module(void) __attribute__((alias(#exitfn))); 138 void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
139 139
140#endif 140#endif
141 141
@@ -828,7 +828,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
828static inline void module_bug_cleanup(struct module *mod) {} 828static inline void module_bug_cleanup(struct module *mod) {}
829#endif /* CONFIG_GENERIC_BUG */ 829#endif /* CONFIG_GENERIC_BUG */
830 830
831#ifdef RETPOLINE 831#ifdef CONFIG_RETPOLINE
832extern bool retpoline_module_ok(bool has_retpoline); 832extern bool retpoline_module_ok(bool has_retpoline);
833#else 833#else
834static inline bool retpoline_module_ok(bool has_retpoline) 834static inline bool retpoline_module_ok(bool has_retpoline)
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 2b2a6dce1630..4c76fe2c8488 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -11,6 +11,8 @@
11#define _LINUX_NETDEV_FEATURES_H 11#define _LINUX_NETDEV_FEATURES_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/bitops.h>
15#include <asm/byteorder.h>
14 16
15typedef u64 netdev_features_t; 17typedef u64 netdev_features_t;
16 18
@@ -154,8 +156,26 @@ enum {
154#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) 156#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
155#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) 157#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
156 158
157#define for_each_netdev_feature(mask_addr, bit) \ 159/* Finds the next feature with the highest number of the range of start till 0.
158 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) 160 */
161static inline int find_next_netdev_feature(u64 feature, unsigned long start)
162{
163 /* like BITMAP_LAST_WORD_MASK() for u64
164 * this sets the most significant 64 - start to 0.
165 */
166 feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
167
168 return fls64(feature) - 1;
169}
170
171/* This goes for the MSB to the LSB through the set feature bits,
172 * mask_addr should be a u64 and bit an int
173 */
174#define for_each_netdev_feature(mask_addr, bit) \
175 for ((bit) = find_next_netdev_feature((mask_addr), \
176 NETDEV_FEATURE_COUNT); \
177 (bit) >= 0; \
178 (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
159 179
160/* Features valid for ethtool to change */ 180/* Features valid for ethtool to change */
161/* = all defined minus driver/device-class-related */ 181/* = all defined minus driver/device-class-related */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1377d085ef99..86dbb3e29139 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1483,6 +1483,7 @@ struct net_device_ops {
1483 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1483 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
1484 * @IFF_FAILOVER: device is a failover master device 1484 * @IFF_FAILOVER: device is a failover master device
1485 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1485 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
1486 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
1486 */ 1487 */
1487enum netdev_priv_flags { 1488enum netdev_priv_flags {
1488 IFF_802_1Q_VLAN = 1<<0, 1489 IFF_802_1Q_VLAN = 1<<0,
@@ -1514,6 +1515,7 @@ enum netdev_priv_flags {
1514 IFF_NO_RX_HANDLER = 1<<26, 1515 IFF_NO_RX_HANDLER = 1<<26,
1515 IFF_FAILOVER = 1<<27, 1516 IFF_FAILOVER = 1<<27,
1516 IFF_FAILOVER_SLAVE = 1<<28, 1517 IFF_FAILOVER_SLAVE = 1<<28,
1518 IFF_L3MDEV_RX_HANDLER = 1<<29,
1517}; 1519};
1518 1520
1519#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1521#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1544,6 +1546,7 @@ enum netdev_priv_flags {
1544#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER 1546#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1545#define IFF_FAILOVER IFF_FAILOVER 1547#define IFF_FAILOVER IFF_FAILOVER
1546#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE 1548#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1549#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1547 1550
1548/** 1551/**
1549 * struct net_device - The DEVICE structure. 1552 * struct net_device - The DEVICE structure.
@@ -4549,6 +4552,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
4549 return dev->priv_flags & IFF_SUPP_NOFCS; 4552 return dev->priv_flags & IFF_SUPP_NOFCS;
4550} 4553}
4551 4554
4555static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
4556{
4557 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
4558}
4559
4552static inline bool netif_is_l3_master(const struct net_device *dev) 4560static inline bool netif_is_l3_master(const struct net_device *dev)
4553{ 4561{
4554 return dev->priv_flags & IFF_L3MDEV_MASTER; 4562 return dev->priv_flags & IFF_L3MDEV_MASTER;
diff --git a/include/linux/of.h b/include/linux/of.h
index fe472e5195a9..e240992e5cb6 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -50,7 +50,6 @@ struct of_irq_controller;
50 50
51struct device_node { 51struct device_node {
52 const char *name; 52 const char *name;
53 const char *type;
54 phandle phandle; 53 phandle phandle;
55 const char *full_name; 54 const char *full_name;
56 struct fwnode_handle fwnode; 55 struct fwnode_handle fwnode;
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index cb1adf0b78a9..249d4d7fbf18 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -24,7 +24,7 @@ static inline void *
24pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, 24pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
25 dma_addr_t *dma_handle) 25 dma_addr_t *dma_handle)
26{ 26{
27 return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); 27 return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
28} 28}
29 29
30static inline void 30static inline void
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1d5c551a5add..e1a051724f7e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -447,6 +447,11 @@ struct pmu {
447 * Filter events for PMU-specific reasons. 447 * Filter events for PMU-specific reasons.
448 */ 448 */
449 int (*filter_match) (struct perf_event *event); /* optional */ 449 int (*filter_match) (struct perf_event *event); /* optional */
450
451 /*
452 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
453 */
454 int (*check_period) (struct perf_event *event, u64 value); /* optional */
450}; 455};
451 456
452enum perf_addr_filter_action_t { 457enum perf_addr_filter_action_t {
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3b051f761450..333b56d8f746 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
48extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; 48extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
49extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; 49extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
50extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; 50extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
51extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
51extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; 52extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
52 53
53#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) 54#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
@@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
56#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) 57#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
57#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) 58#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
58#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) 59#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
60#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
59#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) 61#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
60 62
61extern const int phy_10_100_features_array[4]; 63extern const int phy_10_100_features_array[4];
@@ -467,8 +469,8 @@ struct phy_device {
467 * only works for PHYs with IDs which match this field 469 * only works for PHYs with IDs which match this field
468 * name: The friendly name of this PHY type 470 * name: The friendly name of this PHY type
469 * phy_id_mask: Defines the important bits of the phy_id 471 * phy_id_mask: Defines the important bits of the phy_id
470 * features: A list of features (speed, duplex, etc) supported 472 * features: A mandatory list of features (speed, duplex, etc)
471 * by this PHY 473 * supported by this PHY
472 * flags: A bitfield defining certain other features this PHY 474 * flags: A bitfield defining certain other features this PHY
473 * supports (like interrupts) 475 * supports (like interrupts)
474 * 476 *
@@ -672,26 +674,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
672size_t phy_speeds(unsigned int *speeds, size_t size, 674size_t phy_speeds(unsigned int *speeds, size_t size,
673 unsigned long *mask); 675 unsigned long *mask);
674 676
675static inline bool __phy_is_started(struct phy_device *phydev)
676{
677 WARN_ON(!mutex_is_locked(&phydev->lock));
678
679 return phydev->state >= PHY_UP;
680}
681
682/** 677/**
683 * phy_is_started - Convenience function to check whether PHY is started 678 * phy_is_started - Convenience function to check whether PHY is started
684 * @phydev: The phy_device struct 679 * @phydev: The phy_device struct
685 */ 680 */
686static inline bool phy_is_started(struct phy_device *phydev) 681static inline bool phy_is_started(struct phy_device *phydev)
687{ 682{
688 bool started; 683 return phydev->state >= PHY_UP;
689
690 mutex_lock(&phydev->lock);
691 started = __phy_is_started(phydev);
692 mutex_unlock(&phydev->lock);
693
694 return started;
695} 684}
696 685
697void phy_resolve_aneg_linkmode(struct phy_device *phydev); 686void phy_resolve_aneg_linkmode(struct phy_device *phydev);
@@ -1003,6 +992,14 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
1003{ 992{
1004 return 0; 993 return 0;
1005} 994}
995static inline int genphy_no_ack_interrupt(struct phy_device *phydev)
996{
997 return 0;
998}
999static inline int genphy_no_config_intr(struct phy_device *phydev)
1000{
1001 return 0;
1002}
1006int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, 1003int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
1007 u16 regnum); 1004 u16 regnum);
1008int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, 1005int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index e8e118d70fd7..3f350e2749fe 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -42,6 +42,7 @@ enum phy_mode {
42 PHY_MODE_PCIE, 42 PHY_MODE_PCIE,
43 PHY_MODE_ETHERNET, 43 PHY_MODE_ETHERNET,
44 PHY_MODE_MIPI_DPHY, 44 PHY_MODE_MIPI_DPHY,
45 PHY_MODE_SATA
45}; 46};
46 47
47/** 48/**
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0a2a88e5a383..b895f4e79868 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -108,6 +108,7 @@ void dev_pm_opp_put(struct dev_pm_opp *opp);
108int dev_pm_opp_add(struct device *dev, unsigned long freq, 108int dev_pm_opp_add(struct device *dev, unsigned long freq,
109 unsigned long u_volt); 109 unsigned long u_volt);
110void dev_pm_opp_remove(struct device *dev, unsigned long freq); 110void dev_pm_opp_remove(struct device *dev, unsigned long freq);
111void dev_pm_opp_remove_all_dynamic(struct device *dev);
111 112
112int dev_pm_opp_enable(struct device *dev, unsigned long freq); 113int dev_pm_opp_enable(struct device *dev, unsigned long freq);
113 114
@@ -217,6 +218,10 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
217{ 218{
218} 219}
219 220
221static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
222{
223}
224
220static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) 225static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
221{ 226{
222 return 0; 227 return 0;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 54af4eef169f..fed5be706bc9 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
105 105
106static inline void pm_runtime_mark_last_busy(struct device *dev) 106static inline void pm_runtime_mark_last_busy(struct device *dev)
107{ 107{
108 WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get())); 108 WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
109} 109}
110 110
111static inline bool pm_runtime_is_irq_safe(struct device *dev) 111static inline bool pm_runtime_is_irq_safe(struct device *dev)
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 1637385bcc17..d0aecc04c54b 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -13,6 +13,7 @@
13#ifndef __QCOM_SCM_H 13#ifndef __QCOM_SCM_H
14#define __QCOM_SCM_H 14#define __QCOM_SCM_H
15 15
16#include <linux/err.h>
16#include <linux/types.h> 17#include <linux/types.h>
17#include <linux/cpumask.h> 18#include <linux/cpumask.h>
18 19
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 59ddf9af909e..2dd0a9ed5b36 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -663,6 +663,37 @@ out:
663static inline void qed_chain_set_prod(struct qed_chain *p_chain, 663static inline void qed_chain_set_prod(struct qed_chain *p_chain,
664 u32 prod_idx, void *p_prod_elem) 664 u32 prod_idx, void *p_prod_elem)
665{ 665{
666 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
667 u32 cur_prod, page_mask, page_cnt, page_diff;
668
669 cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
670 p_chain->u.chain32.prod_idx;
671
672 /* Assume that number of elements in a page is power of 2 */
673 page_mask = ~p_chain->elem_per_page_mask;
674
675 /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
676 * reaches the first element of next page before the page index
677 * is incremented. See qed_chain_produce().
678 * Index wrap around is not a problem because the difference
679 * between current and given producer indices is always
680 * positive and lower than the chain's capacity.
681 */
682 page_diff = (((cur_prod - 1) & page_mask) -
683 ((prod_idx - 1) & page_mask)) /
684 p_chain->elem_per_page;
685
686 page_cnt = qed_chain_get_page_cnt(p_chain);
687 if (is_chain_u16(p_chain))
688 p_chain->pbl.c.u16.prod_page_idx =
689 (p_chain->pbl.c.u16.prod_page_idx -
690 page_diff + page_cnt) % page_cnt;
691 else
692 p_chain->pbl.c.u32.prod_page_idx =
693 (p_chain->pbl.c.u32.prod_page_idx -
694 page_diff + page_cnt) % page_cnt;
695 }
696
666 if (is_chain_u16(p_chain)) 697 if (is_chain_u16(p_chain))
667 p_chain->u.chain16.prod_idx = (u16) prod_idx; 698 p_chain->u.chain16.prod_idx = (u16) prod_idx;
668 else 699 else
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 29af6d6b2f4b..c1901b61ca30 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -32,6 +32,8 @@ struct reset_control *devm_reset_control_array_get(struct device *dev,
32struct reset_control *of_reset_control_array_get(struct device_node *np, 32struct reset_control *of_reset_control_array_get(struct device_node *np,
33 bool shared, bool optional); 33 bool shared, bool optional);
34 34
35int reset_control_get_count(struct device *dev);
36
35#else 37#else
36 38
37static inline int reset_control_reset(struct reset_control *rstc) 39static inline int reset_control_reset(struct reset_control *rstc)
@@ -97,6 +99,11 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
97 return optional ? NULL : ERR_PTR(-ENOTSUPP); 99 return optional ? NULL : ERR_PTR(-ENOTSUPP);
98} 100}
99 101
102static inline int reset_control_get_count(struct device *dev)
103{
104 return -ENOENT;
105}
106
100#endif /* CONFIG_RESET_CONTROLLER */ 107#endif /* CONFIG_RESET_CONTROLLER */
101 108
102static inline int __must_check device_reset(struct device *dev) 109static inline int __must_check device_reset(struct device *dev)
@@ -138,7 +145,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
138 * 145 *
139 * Returns a struct reset_control or IS_ERR() condition containing errno. 146 * Returns a struct reset_control or IS_ERR() condition containing errno.
140 * This function is intended for use with reset-controls which are shared 147 * This function is intended for use with reset-controls which are shared
141 * between hardware-blocks. 148 * between hardware blocks.
142 * 149 *
143 * When a reset-control is shared, the behavior of reset_control_assert / 150 * When a reset-control is shared, the behavior of reset_control_assert /
144 * deassert is changed, the reset-core will keep track of a deassert_count 151 * deassert is changed, the reset-core will keep track of a deassert_count
@@ -187,7 +194,7 @@ static inline struct reset_control *of_reset_control_get_exclusive(
187} 194}
188 195
189/** 196/**
190 * of_reset_control_get_shared - Lookup and obtain an shared reference 197 * of_reset_control_get_shared - Lookup and obtain a shared reference
191 * to a reset controller. 198 * to a reset controller.
192 * @node: device to be reset by the controller 199 * @node: device to be reset by the controller
193 * @id: reset line name 200 * @id: reset line name
@@ -229,7 +236,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
229} 236}
230 237
231/** 238/**
232 * of_reset_control_get_shared_by_index - Lookup and obtain an shared 239 * of_reset_control_get_shared_by_index - Lookup and obtain a shared
233 * reference to a reset controller 240 * reference to a reset controller
234 * by index. 241 * by index.
235 * @node: device to be reset by the controller 242 * @node: device to be reset by the controller
@@ -322,7 +329,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
322 329
323/** 330/**
324 * devm_reset_control_get_shared_by_index - resource managed 331 * devm_reset_control_get_shared_by_index - resource managed
325 * reset_control_get_shared 332 * reset_control_get_shared
326 * @dev: device to be reset by the controller 333 * @dev: device to be reset by the controller
327 * @index: index of the reset controller 334 * @index: index of the reset controller
328 * 335 *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 89541d248893..bba3afb4e9bf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -995,7 +995,7 @@ struct task_struct {
995 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 995 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
996 struct list_head cg_list; 996 struct list_head cg_list;
997#endif 997#endif
998#ifdef CONFIG_RESCTRL 998#ifdef CONFIG_X86_CPU_RESCTRL
999 u32 closid; 999 u32 closid;
1000 u32 rmid; 1000 u32 rmid;
1001#endif 1001#endif
@@ -1406,6 +1406,7 @@ extern struct pid *cad_pid;
1406#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1406#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
1407#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1407#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1408#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ 1408#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
1409#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
1409#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1410#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1410#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1411#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1411#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1412#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
@@ -1904,6 +1905,14 @@ static inline void rseq_execve(struct task_struct *t)
1904 1905
1905#endif 1906#endif
1906 1907
1908void __exit_umh(struct task_struct *tsk);
1909
1910static inline void exit_umh(struct task_struct *tsk)
1911{
1912 if (unlikely(tsk->flags & PF_UMH))
1913 __exit_umh(tsk);
1914}
1915
1907#ifdef CONFIG_DEBUG_RSEQ 1916#ifdef CONFIG_DEBUG_RSEQ
1908 1917
1909void rseq_syscall(struct pt_regs *regs); 1918void rseq_syscall(struct pt_regs *regs);
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index ec912d01126f..ecdc6542070f 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ 71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ 72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
73#define MMF_OOM_VICTIM 25 /* mm is the oom victim */ 73#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
74#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
74#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) 75#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
75 76
76#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ 77#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 10b19a192b2d..545f37138057 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -24,9 +24,13 @@
24 * called near the end of a function. Otherwise, the list can be 24 * called near the end of a function. Otherwise, the list can be
25 * re-initialized for later re-use by wake_q_init(). 25 * re-initialized for later re-use by wake_q_init().
26 * 26 *
27 * Note that this can cause spurious wakeups. schedule() callers 27 * NOTE that this can cause spurious wakeups. schedule() callers
28 * must ensure the call is done inside a loop, confirming that the 28 * must ensure the call is done inside a loop, confirming that the
29 * wakeup condition has in fact occurred. 29 * wakeup condition has in fact occurred.
30 *
31 * NOTE that there is no guarantee the wakeup will happen any later than the
32 * wake_q_add() location. Therefore task must be ready to be woken at the
33 * location of the wake_q_add().
30 */ 34 */
31 35
32#include <linux/sched.h> 36#include <linux/sched.h>
diff --git a/include/linux/signal.h b/include/linux/signal.h
index cc7e2c1cd444..9702016734b1 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -392,7 +392,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig);
392#endif 392#endif
393 393
394#define siginmask(sig, mask) \ 394#define siginmask(sig, mask) \
395 ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) 395 ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
396 396
397#define SIG_KERNEL_ONLY_MASK (\ 397#define SIG_KERNEL_ONLY_MASK (\
398 rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) 398 rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 93f56fddd92a..bdb9563c64a0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2434,7 +2434,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
2434 2434
2435 if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) 2435 if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
2436 skb_set_transport_header(skb, keys.control.thoff); 2436 skb_set_transport_header(skb, keys.control.thoff);
2437 else 2437 else if (offset_hint >= 0)
2438 skb_set_transport_header(skb, offset_hint); 2438 skb_set_transport_header(skb, offset_hint);
2439} 2439}
2440 2440
@@ -3218,6 +3218,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3218 * 3218 *
3219 * This is exactly the same as pskb_trim except that it ensures the 3219 * This is exactly the same as pskb_trim except that it ensures the
3220 * checksum of received packets are still valid after the operation. 3220 * checksum of received packets are still valid after the operation.
3221 * It can change skb pointers.
3221 */ 3222 */
3222 3223
3223static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 3224static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
@@ -4211,6 +4212,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4211 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; 4212 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4212} 4213}
4213 4214
4215static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4216{
4217 return skb_is_gso(skb) &&
4218 skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4219}
4220
4214static inline void skb_gso_reset(struct sk_buff *skb) 4221static inline void skb_gso_reset(struct sk_buff *skb)
4215{ 4222{
4216 skb_shinfo(skb)->gso_size = 0; 4223 skb_shinfo(skb)->gso_size = 0;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 7ddfc65586b0..4335bd771ce5 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
184 struct clk *pclk; 184 struct clk *pclk;
185 struct clk *clk_ptp_ref; 185 struct clk *clk_ptp_ref;
186 unsigned int clk_ptp_rate; 186 unsigned int clk_ptp_rate;
187 unsigned int clk_ref_rate;
187 struct reset_control *stmmac_rst; 188 struct reset_control *stmmac_rst;
188 struct stmmac_axi *axi; 189 struct stmmac_axi *axi;
189 int has_gmac4; 190 int has_gmac4;
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 235f51b62c71..0c08de356d0d 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -47,6 +47,8 @@ struct umh_info {
47 const char *cmdline; 47 const char *cmdline;
48 struct file *pipe_to_umh; 48 struct file *pipe_to_umh;
49 struct file *pipe_from_umh; 49 struct file *pipe_from_umh;
50 struct list_head list;
51 void (*cleanup)(struct umh_info *info);
50 pid_t pid; 52 pid_t pid;
51}; 53};
52int fork_usermode_blob(void *data, size_t len, struct umh_info *info); 54int fork_usermode_blob(void *data, size_t len, struct umh_info *info);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 32baf8e26735..987b6491b946 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -12,6 +12,11 @@ struct irq_affinity;
12 12
13/** 13/**
14 * virtio_config_ops - operations for configuring a virtio device 14 * virtio_config_ops - operations for configuring a virtio device
15 * Note: Do not assume that a transport implements all of the operations
16 * getting/setting a value as a simple read/write! Generally speaking,
17 * any of @get/@set, @get_status/@set_status, or @get_features/
18 * @finalize_features are NOT safe to be called from an atomic
19 * context.
15 * @get: read the value of a configuration field 20 * @get: read the value of a configuration field
16 * vdev: the virtio_device 21 * vdev: the virtio_device
17 * offset: the offset of the configuration field 22 * offset: the offset of the configuration field
@@ -22,7 +27,7 @@ struct irq_affinity;
22 * offset: the offset of the configuration field 27 * offset: the offset of the configuration field
23 * buf: the buffer to read the field value from. 28 * buf: the buffer to read the field value from.
24 * len: the length of the buffer 29 * len: the length of the buffer
25 * @generation: config generation counter 30 * @generation: config generation counter (optional)
26 * vdev: the virtio_device 31 * vdev: the virtio_device
27 * Returns the config generation counter 32 * Returns the config generation counter
28 * @get_status: read the status byte 33 * @get_status: read the status byte
@@ -48,17 +53,17 @@ struct irq_affinity;
48 * @del_vqs: free virtqueues found by find_vqs(). 53 * @del_vqs: free virtqueues found by find_vqs().
49 * @get_features: get the array of feature bits for this device. 54 * @get_features: get the array of feature bits for this device.
50 * vdev: the virtio_device 55 * vdev: the virtio_device
51 * Returns the first 32 feature bits (all we currently need). 56 * Returns the first 64 feature bits (all we currently need).
52 * @finalize_features: confirm what device features we'll be using. 57 * @finalize_features: confirm what device features we'll be using.
53 * vdev: the virtio_device 58 * vdev: the virtio_device
54 * This gives the final feature bits for the device: it can change 59 * This gives the final feature bits for the device: it can change
55 * the dev->feature bits if it wants. 60 * the dev->feature bits if it wants.
56 * Returns 0 on success or error status 61 * Returns 0 on success or error status
57 * @bus_name: return the bus name associated with the device 62 * @bus_name: return the bus name associated with the device (optional)
58 * vdev: the virtio_device 63 * vdev: the virtio_device
59 * This returns a pointer to the bus name a la pci_name from which 64 * This returns a pointer to the bus name a la pci_name from which
60 * the caller can then copy. 65 * the caller can then copy.
61 * @set_vq_affinity: set the affinity for a virtqueue. 66 * @set_vq_affinity: set the affinity for a virtqueue (optional).
62 * @get_vq_affinity: get the affinity for a virtqueue (optional). 67 * @get_vq_affinity: get the affinity for a virtqueue (optional).
63 */ 68 */
64typedef void vq_callback_t(struct virtqueue *); 69typedef void vq_callback_t(struct virtqueue *);
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index cb462f9ab7dd..e0348cb0a1dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
57 57
58 if (!skb_partial_csum_set(skb, start, off)) 58 if (!skb_partial_csum_set(skb, start, off))
59 return -EINVAL; 59 return -EINVAL;
60 } else {
61 /* gso packets without NEEDS_CSUM do not set transport_offset.
62 * probe and drop if does not match one of the above types.
63 */
64 if (gso_type && skb->network_header) {
65 if (!skb->protocol)
66 virtio_net_hdr_set_proto(skb, hdr);
67retry:
68 skb_probe_transport_header(skb, -1);
69 if (!skb_transport_header_was_set(skb)) {
70 /* UFO does not specify ipv4 or 6: try both */
71 if (gso_type & SKB_GSO_UDP &&
72 skb->protocol == htons(ETH_P_IP)) {
73 skb->protocol = htons(ETH_P_IPV6);
74 goto retry;
75 }
76 return -EINVAL;
77 }
78 }
60 } 79 }
61 80
62 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 81 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index f492e21c4aa2..5d9d318bcf7a 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry)
176 */ 176 */
177static inline bool xa_is_err(const void *entry) 177static inline bool xa_is_err(const void *entry)
178{ 178{
179 return unlikely(xa_is_internal(entry)); 179 return unlikely(xa_is_internal(entry) &&
180 entry >= xa_mk_internal(-MAX_ERRNO));
180} 181}
181 182
182/** 183/**
@@ -286,7 +287,6 @@ struct xarray {
286 */ 287 */
287#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) 288#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
288 289
289void xa_init_flags(struct xarray *, gfp_t flags);
290void *xa_load(struct xarray *, unsigned long index); 290void *xa_load(struct xarray *, unsigned long index);
291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); 291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
292void *xa_erase(struct xarray *, unsigned long index); 292void *xa_erase(struct xarray *, unsigned long index);
@@ -304,6 +304,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
304void xa_destroy(struct xarray *); 304void xa_destroy(struct xarray *);
305 305
306/** 306/**
307 * xa_init_flags() - Initialise an empty XArray with flags.
308 * @xa: XArray.
309 * @flags: XA_FLAG values.
310 *
311 * If you need to initialise an XArray with special flags (eg you need
312 * to take the lock from interrupt context), use this function instead
313 * of xa_init().
314 *
315 * Context: Any context.
316 */
317static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
318{
319 spin_lock_init(&xa->xa_lock);
320 xa->xa_flags = flags;
321 xa->xa_head = NULL;
322}
323
324/**
307 * xa_init() - Initialise an empty XArray. 325 * xa_init() - Initialise an empty XArray.
308 * @xa: XArray. 326 * @xa: XArray.
309 * 327 *
@@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
342} 360}
343 361
344/** 362/**
345 * xa_for_each() - Iterate over a portion of an XArray. 363 * xa_for_each_start() - Iterate over a portion of an XArray.
346 * @xa: XArray. 364 * @xa: XArray.
365 * @index: Index of @entry.
347 * @entry: Entry retrieved from array. 366 * @entry: Entry retrieved from array.
367 * @start: First index to retrieve from array.
368 *
369 * During the iteration, @entry will have the value of the entry stored
370 * in @xa at @index. You may modify @index during the iteration if you
371 * want to skip or reprocess indices. It is safe to modify the array
372 * during the iteration. At the end of the iteration, @entry will be set
373 * to NULL and @index will have a value less than or equal to max.
374 *
375 * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
376 * to handle your own locking with xas_for_each(), and if you have to unlock
377 * after each iteration, it will also end up being O(n.log(n)).
378 * xa_for_each_start() will spin if it hits a retry entry; if you intend to
379 * see retry entries, you should use the xas_for_each() iterator instead.
380 * The xas_for_each() iterator will expand into more inline code than
381 * xa_for_each_start().
382 *
383 * Context: Any context. Takes and releases the RCU lock.
384 */
385#define xa_for_each_start(xa, index, entry, start) \
386 for (index = start, \
387 entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \
388 entry; \
389 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
390
391/**
392 * xa_for_each() - Iterate over present entries in an XArray.
393 * @xa: XArray.
348 * @index: Index of @entry. 394 * @index: Index of @entry.
349 * @max: Maximum index to retrieve from array. 395 * @entry: Entry retrieved from array.
350 * @filter: Selection criterion.
351 * 396 *
352 * Initialise @index to the lowest index you want to retrieve from the 397 * During the iteration, @entry will have the value of the entry stored
353 * array. During the iteration, @entry will have the value of the entry 398 * in @xa at @index. You may modify @index during the iteration if you want
354 * stored in @xa at @index. The iteration will skip all entries in the 399 * to skip or reprocess indices. It is safe to modify the array during the
355 * array which do not match @filter. You may modify @index during the 400 * iteration. At the end of the iteration, @entry will be set to NULL and
356 * iteration if you want to skip or reprocess indices. It is safe to modify 401 * @index will have a value less than or equal to max.
357 * the array during the iteration. At the end of the iteration, @entry will
358 * be set to NULL and @index will have a value less than or equal to max.
359 * 402 *
360 * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have 403 * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
361 * to handle your own locking with xas_for_each(), and if you have to unlock 404 * to handle your own locking with xas_for_each(), and if you have to unlock
@@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
366 * 409 *
367 * Context: Any context. Takes and releases the RCU lock. 410 * Context: Any context. Takes and releases the RCU lock.
368 */ 411 */
369#define xa_for_each(xa, entry, index, max, filter) \ 412#define xa_for_each(xa, index, entry) \
370 for (entry = xa_find(xa, &index, max, filter); entry; \ 413 xa_for_each_start(xa, index, entry, 0)
371 entry = xa_find_after(xa, &index, max, filter)) 414
415/**
416 * xa_for_each_marked() - Iterate over marked entries in an XArray.
417 * @xa: XArray.
418 * @index: Index of @entry.
419 * @entry: Entry retrieved from array.
420 * @filter: Selection criterion.
421 *
422 * During the iteration, @entry will have the value of the entry stored
423 * in @xa at @index. The iteration will skip all entries in the array
424 * which do not match @filter. You may modify @index during the iteration
425 * if you want to skip or reprocess indices. It is safe to modify the array
426 * during the iteration. At the end of the iteration, @entry will be set to
427 * NULL and @index will have a value less than or equal to max.
428 *
429 * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
430 * You have to handle your own locking with xas_for_each(), and if you have
431 * to unlock after each iteration, it will also end up being O(n.log(n)).
432 * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
433 * see retry entries, you should use the xas_for_each_marked() iterator
434 * instead. The xas_for_each_marked() iterator will expand into more inline
435 * code than xa_for_each_marked().
436 *
437 * Context: Any context. Takes and releases the RCU lock.
438 */
439#define xa_for_each_marked(xa, index, entry, filter) \
440 for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
441 entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
372 442
373#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) 443#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
374#define xa_lock(xa) spin_lock(&(xa)->xa_lock) 444#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
@@ -393,40 +463,13 @@ void *__xa_erase(struct xarray *, unsigned long index);
393void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); 463void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
394void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, 464void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
395 void *entry, gfp_t); 465 void *entry, gfp_t);
466int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
396int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); 467int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
397int __xa_reserve(struct xarray *, unsigned long index, gfp_t); 468int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
398void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); 469void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
399void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); 470void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
400 471
401/** 472/**
402 * __xa_insert() - Store this entry in the XArray unless another entry is
403 * already present.
404 * @xa: XArray.
405 * @index: Index into array.
406 * @entry: New entry.
407 * @gfp: Memory allocation flags.
408 *
409 * If you would rather see the existing entry in the array, use __xa_cmpxchg().
410 * This function is for users who don't care what the entry is, only that
411 * one is present.
412 *
413 * Context: Any context. Expects xa_lock to be held on entry. May
414 * release and reacquire xa_lock if the @gfp flags permit.
415 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
416 * -ENOMEM if memory could not be allocated.
417 */
418static inline int __xa_insert(struct xarray *xa, unsigned long index,
419 void *entry, gfp_t gfp)
420{
421 void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
422 if (!curr)
423 return 0;
424 if (xa_is_err(curr))
425 return xa_err(curr);
426 return -EEXIST;
427}
428
429/**
430 * xa_store_bh() - Store this entry in the XArray. 473 * xa_store_bh() - Store this entry in the XArray.
431 * @xa: XArray. 474 * @xa: XArray.
432 * @index: Index into array. 475 * @index: Index into array.
@@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
453} 496}
454 497
455/** 498/**
456 * xa_store_irq() - Erase this entry from the XArray. 499 * xa_store_irq() - Store this entry in the XArray.
457 * @xa: XArray. 500 * @xa: XArray.
458 * @index: Index into array. 501 * @index: Index into array.
459 * @entry: New entry. 502 * @entry: New entry.
@@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
615 * @entry: New entry. 658 * @entry: New entry.
616 * @gfp: Memory allocation flags. 659 * @gfp: Memory allocation flags.
617 * 660 *
618 * If you would rather see the existing entry in the array, use xa_cmpxchg(). 661 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
619 * This function is for users who don't care what the entry is, only that 662 * if no entry is present. Inserting will fail if a reserved entry is
620 * one is present. 663 * present, even though loading from this index will return NULL.
621 * 664 *
622 * Context: Process context. Takes and releases the xa_lock. 665 * Context: Any context. Takes and releases the xa_lock. May sleep if
623 * May sleep if the @gfp flags permit. 666 * the @gfp flags permit.
624 * Return: 0 if the store succeeded. -EEXIST if another entry was present. 667 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
625 * -ENOMEM if memory could not be allocated. 668 * -ENOMEM if memory could not be allocated.
626 */ 669 */
627static inline int xa_insert(struct xarray *xa, unsigned long index, 670static inline int xa_insert(struct xarray *xa, unsigned long index,
628 void *entry, gfp_t gfp) 671 void *entry, gfp_t gfp)
629{ 672{
630 void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); 673 int err;
631 if (!curr) 674
632 return 0; 675 xa_lock(xa);
633 if (xa_is_err(curr)) 676 err = __xa_insert(xa, index, entry, gfp);
634 return xa_err(curr); 677 xa_unlock(xa);
635 return -EEXIST; 678
679 return err;
680}
681
682/**
683 * xa_insert_bh() - Store this entry in the XArray unless another entry is
684 * already present.
685 * @xa: XArray.
686 * @index: Index into array.
687 * @entry: New entry.
688 * @gfp: Memory allocation flags.
689 *
690 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
691 * if no entry is present. Inserting will fail if a reserved entry is
692 * present, even though loading from this index will return NULL.
693 *
694 * Context: Any context. Takes and releases the xa_lock while
695 * disabling softirqs. May sleep if the @gfp flags permit.
696 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
697 * -ENOMEM if memory could not be allocated.
698 */
699static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
700 void *entry, gfp_t gfp)
701{
702 int err;
703
704 xa_lock_bh(xa);
705 err = __xa_insert(xa, index, entry, gfp);
706 xa_unlock_bh(xa);
707
708 return err;
709}
710
711/**
712 * xa_insert_irq() - Store this entry in the XArray unless another entry is
713 * already present.
714 * @xa: XArray.
715 * @index: Index into array.
716 * @entry: New entry.
717 * @gfp: Memory allocation flags.
718 *
719 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
720 * if no entry is present. Inserting will fail if a reserved entry is
721 * present, even though loading from this index will return NULL.
722 *
723 * Context: Process context. Takes and releases the xa_lock while
724 * disabling interrupts. May sleep if the @gfp flags permit.
725 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
726 * -ENOMEM if memory could not be allocated.
727 */
728static inline int xa_insert_irq(struct xarray *xa, unsigned long index,
729 void *entry, gfp_t gfp)
730{
731 int err;
732
733 xa_lock_irq(xa);
734 err = __xa_insert(xa, index, entry, gfp);
735 xa_unlock_irq(xa);
736
737 return err;
636} 738}
637 739
638/** 740/**
@@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry)
970 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); 1072 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
971} 1073}
972 1074
973#define XA_ZERO_ENTRY xa_mk_internal(256) 1075#define XA_RETRY_ENTRY xa_mk_internal(256)
974#define XA_RETRY_ENTRY xa_mk_internal(257) 1076#define XA_ZERO_ENTRY xa_mk_internal(257)
975 1077
976/** 1078/**
977 * xa_is_zero() - Is the entry a zero entry? 1079 * xa_is_zero() - Is the entry a zero entry?
@@ -996,6 +1098,17 @@ static inline bool xa_is_retry(const void *entry)
996} 1098}
997 1099
998/** 1100/**
1101 * xa_is_advanced() - Is the entry only permitted for the advanced API?
1102 * @entry: Entry to be stored in the XArray.
1103 *
1104 * Return: %true if the entry cannot be stored by the normal API.
1105 */
1106static inline bool xa_is_advanced(const void *entry)
1107{
1108 return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
1109}
1110
1111/**
999 * typedef xa_update_node_t - A callback function from the XArray. 1112 * typedef xa_update_node_t - A callback function from the XArray.
1000 * @node: The node which is being processed 1113 * @node: The node which is being processed
1001 * 1114 *
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 1adefe42c0a6..2bfb87eb98ce 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -21,18 +21,6 @@ struct socket;
21struct rxrpc_call; 21struct rxrpc_call;
22 22
23/* 23/*
24 * Call completion condition (state == RXRPC_CALL_COMPLETE).
25 */
26enum rxrpc_call_completion {
27 RXRPC_CALL_SUCCEEDED, /* - Normal termination */
28 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
29 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
30 RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
31 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
32 NR__RXRPC_CALL_COMPLETIONS
33};
34
35/*
36 * Debug ID counter for tracing. 24 * Debug ID counter for tracing.
37 */ 25 */
38extern atomic_t rxrpc_debug_id; 26extern atomic_t rxrpc_debug_id;
@@ -73,10 +61,6 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
73 rxrpc_user_attach_call_t, unsigned long, gfp_t, 61 rxrpc_user_attach_call_t, unsigned long, gfp_t,
74 unsigned int); 62 unsigned int);
75void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); 63void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
76int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
77 struct sockaddr_rxrpc *, struct key *);
78int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
79 enum rxrpc_call_completion *, u32 *);
80u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); 64u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
81void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); 65void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
82u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); 66u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 3f9aea8087e3..8b7eb46ad72d 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
201 201
202void __ax25_put_route(ax25_route *ax25_rt); 202void __ax25_put_route(ax25_route *ax25_rt);
203 203
204extern rwlock_t ax25_route_lock;
205
206static inline void ax25_route_lock_use(void)
207{
208 read_lock(&ax25_route_lock);
209}
210
211static inline void ax25_route_lock_unuse(void)
212{
213 read_unlock(&ax25_route_lock);
214}
215
204static inline void ax25_put_route(ax25_route *ax25_rt) 216static inline void ax25_put_route(ax25_route *ax25_rt)
205{ 217{
206 if (refcount_dec_and_test(&ax25_rt->refcount)) 218 if (refcount_dec_and_test(&ax25_rt->refcount))
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 00b5e7825508..74ff688568a0 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -39,6 +39,7 @@ struct inet_peer {
39 39
40 u32 metrics[RTAX_MAX]; 40 u32 metrics[RTAX_MAX];
41 u32 rate_tokens; /* rate limiting for ICMP */ 41 u32 rate_tokens; /* rate limiting for ICMP */
42 u32 n_redirects;
42 unsigned long rate_last; 43 unsigned long rate_last;
43 /* 44 /*
44 * Once inet_peer is queued for deletion (refcnt == 0), following field 45 * Once inet_peer is queued for deletion (refcnt == 0), following field
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index c5969762a8f4..9c8214d2116d 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
241 struct netlink_ext_ack *extack); 241 struct netlink_ext_ack *extack);
242int fib_table_dump(struct fib_table *table, struct sk_buff *skb, 242int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
243 struct netlink_callback *cb, struct fib_dump_filter *filter); 243 struct netlink_callback *cb, struct fib_dump_filter *filter);
244int fib_table_flush(struct net *net, struct fib_table *table); 244int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
245struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); 245struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
246void fib_table_flush_external(struct fib_table *table); 246void fib_table_flush_external(struct fib_table *table);
247void fib_free_table(struct fib_table *tb); 247void fib_free_table(struct fib_table *tb);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 78fa0ac4613c..5175fd63cd82 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -153,7 +153,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
153 153
154 if (netif_is_l3_slave(skb->dev)) 154 if (netif_is_l3_slave(skb->dev))
155 master = netdev_master_upper_dev_get_rcu(skb->dev); 155 master = netdev_master_upper_dev_get_rcu(skb->dev);
156 else if (netif_is_l3_master(skb->dev)) 156 else if (netif_is_l3_master(skb->dev) ||
157 netif_has_l3_rx_handler(skb->dev))
157 master = skb->dev; 158 master = skb->dev;
158 159
159 if (master && master->l3mdev_ops->l3mdev_l3_rcv) 160 if (master && master->l3mdev_ops->l3mdev_l3_rcv)
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 7d5cda7ce32a..3e370cb36263 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -84,7 +84,6 @@ struct flow_offload {
84struct nf_flow_route { 84struct nf_flow_route {
85 struct { 85 struct {
86 struct dst_entry *dst; 86 struct dst_entry *dst;
87 int ifindex;
88 } tuple[FLOW_OFFLOAD_DIR_MAX]; 87 } tuple[FLOW_OFFLOAD_DIR_MAX];
89}; 88};
90 89
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 841835a387e1..b4984bbbe157 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -469,9 +469,7 @@ struct nft_set_binding {
469int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, 469int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
470 struct nft_set_binding *binding); 470 struct nft_set_binding *binding);
471void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 471void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
472 struct nft_set_binding *binding); 472 struct nft_set_binding *binding, bool commit);
473void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
474 struct nft_set_binding *binding);
475void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); 473void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
476 474
477/** 475/**
@@ -721,6 +719,13 @@ struct nft_expr_type {
721#define NFT_EXPR_STATEFUL 0x1 719#define NFT_EXPR_STATEFUL 0x1
722#define NFT_EXPR_GC 0x2 720#define NFT_EXPR_GC 0x2
723 721
722enum nft_trans_phase {
723 NFT_TRANS_PREPARE,
724 NFT_TRANS_ABORT,
725 NFT_TRANS_COMMIT,
726 NFT_TRANS_RELEASE
727};
728
724/** 729/**
725 * struct nft_expr_ops - nf_tables expression operations 730 * struct nft_expr_ops - nf_tables expression operations
726 * 731 *
@@ -750,7 +755,8 @@ struct nft_expr_ops {
750 void (*activate)(const struct nft_ctx *ctx, 755 void (*activate)(const struct nft_ctx *ctx,
751 const struct nft_expr *expr); 756 const struct nft_expr *expr);
752 void (*deactivate)(const struct nft_ctx *ctx, 757 void (*deactivate)(const struct nft_ctx *ctx,
753 const struct nft_expr *expr); 758 const struct nft_expr *expr,
759 enum nft_trans_phase phase);
754 void (*destroy)(const struct nft_ctx *ctx, 760 void (*destroy)(const struct nft_ctx *ctx,
755 const struct nft_expr *expr); 761 const struct nft_expr *expr);
756 void (*destroy_clone)(const struct nft_ctx *ctx, 762 void (*destroy_clone)(const struct nft_ctx *ctx,
@@ -1323,12 +1329,15 @@ struct nft_trans_rule {
1323struct nft_trans_set { 1329struct nft_trans_set {
1324 struct nft_set *set; 1330 struct nft_set *set;
1325 u32 set_id; 1331 u32 set_id;
1332 bool bound;
1326}; 1333};
1327 1334
1328#define nft_trans_set(trans) \ 1335#define nft_trans_set(trans) \
1329 (((struct nft_trans_set *)trans->data)->set) 1336 (((struct nft_trans_set *)trans->data)->set)
1330#define nft_trans_set_id(trans) \ 1337#define nft_trans_set_id(trans) \
1331 (((struct nft_trans_set *)trans->data)->set_id) 1338 (((struct nft_trans_set *)trans->data)->set_id)
1339#define nft_trans_set_bound(trans) \
1340 (((struct nft_trans_set *)trans->data)->bound)
1332 1341
1333struct nft_trans_chain { 1342struct nft_trans_chain {
1334 bool update; 1343 bool update;
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index b669fe6dbc3b..98f31c7ea23d 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -63,10 +63,11 @@ struct pnpipehdr {
63 u8 state_after_reset; /* reset request */ 63 u8 state_after_reset; /* reset request */
64 u8 error_code; /* any response */ 64 u8 error_code; /* any response */
65 u8 pep_type; /* status indication */ 65 u8 pep_type; /* status indication */
66 u8 data[1]; 66 u8 data0; /* anything else */
67 }; 67 };
68 u8 data[];
68}; 69};
69#define other_pep_type data[1] 70#define other_pep_type data[0]
70 71
71static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb) 72static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
72{ 73{
diff --git a/include/net/sock.h b/include/net/sock.h
index 2b229f7be8eb..f43f935cb113 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1277,7 +1277,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
1277 percpu_counter_inc(sk->sk_prot->sockets_allocated); 1277 percpu_counter_inc(sk->sk_prot->sockets_allocated);
1278} 1278}
1279 1279
1280static inline int 1280static inline u64
1281sk_sockets_allocated_read_positive(struct sock *sk) 1281sk_sockets_allocated_read_positive(struct sock *sk)
1282{ 1282{
1283 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); 1283 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
diff --git a/include/net/tls.h b/include/net/tls.h
index 2a6ac8d642af..1486b60c4de8 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -120,6 +120,8 @@ struct tls_rec {
120 struct scatterlist sg_aead_out[2]; 120 struct scatterlist sg_aead_out[2];
121 121
122 char aad_space[TLS_AAD_SPACE_SIZE]; 122 char aad_space[TLS_AAD_SPACE_SIZE];
123 u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE +
124 TLS_CIPHER_AES_GCM_128_SALT_SIZE];
123 struct aead_request aead_req; 125 struct aead_request aead_req;
124 u8 aead_req_ctx[]; 126 u8 aead_req_ctx[];
125}; 127};
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 7298a53b9702..85386becbaea 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -853,7 +853,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
853 xfrm_pol_put(pols[i]); 853 xfrm_pol_put(pols[i]);
854} 854}
855 855
856void __xfrm_state_destroy(struct xfrm_state *); 856void __xfrm_state_destroy(struct xfrm_state *, bool);
857 857
858static inline void __xfrm_state_put(struct xfrm_state *x) 858static inline void __xfrm_state_put(struct xfrm_state *x)
859{ 859{
@@ -863,7 +863,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x)
863static inline void xfrm_state_put(struct xfrm_state *x) 863static inline void xfrm_state_put(struct xfrm_state *x)
864{ 864{
865 if (refcount_dec_and_test(&x->refcnt)) 865 if (refcount_dec_and_test(&x->refcnt))
866 __xfrm_state_destroy(x); 866 __xfrm_state_destroy(x, false);
867}
868
869static inline void xfrm_state_put_sync(struct xfrm_state *x)
870{
871 if (refcount_dec_and_test(&x->refcnt))
872 __xfrm_state_destroy(x, true);
867} 873}
868 874
869static inline void xfrm_state_hold(struct xfrm_state *x) 875static inline void xfrm_state_hold(struct xfrm_state *x)
@@ -1590,7 +1596,7 @@ struct xfrmk_spdinfo {
1590 1596
1591struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); 1597struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1592int xfrm_state_delete(struct xfrm_state *x); 1598int xfrm_state_delete(struct xfrm_state *x);
1593int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); 1599int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1594int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); 1600int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1595void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); 1601void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1596void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); 1602void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index a3ceed3a040a..80debf5982ac 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2579,9 +2579,10 @@ struct ib_device {
2579 2579
2580 const struct uapi_definition *driver_def; 2580 const struct uapi_definition *driver_def;
2581 enum rdma_driver_id driver_id; 2581 enum rdma_driver_id driver_id;
2582
2582 /* 2583 /*
2583 * Provides synchronization between device unregistration and netlink 2584 * Positive refcount indicates that the device is currently
2584 * commands on a device. To be used only by core. 2585 * registered and cannot be unregistered.
2585 */ 2586 */
2586 refcount_t refcount; 2587 refcount_t refcount;
2587 struct completion unreg_completion; 2588 struct completion unreg_completion;
@@ -3926,6 +3927,25 @@ static inline bool ib_access_writable(int access_flags)
3926int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3927int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3927 struct ib_mr_status *mr_status); 3928 struct ib_mr_status *mr_status);
3928 3929
3930/**
3931 * ib_device_try_get: Hold a registration lock
3932 * device: The device to lock
3933 *
3934 * A device under an active registration lock cannot become unregistered. It
3935 * is only possible to obtain a registration lock on a device that is fully
3936 * registered, otherwise this function returns false.
3937 *
3938 * The registration lock is only necessary for actions which require the
3939 * device to still be registered. Uses that only require the device pointer to
3940 * be valid should use get_device(&ibdev->dev) to hold the memory.
3941 *
3942 */
3943static inline bool ib_device_try_get(struct ib_device *dev)
3944{
3945 return refcount_inc_not_zero(&dev->refcount);
3946}
3947
3948void ib_device_put(struct ib_device *device);
3929struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3949struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3930 u16 pkey, const union ib_gid *gid, 3950 u16 pkey, const union ib_gid *gid,
3931 const struct sockaddr *addr); 3951 const struct sockaddr *addr);
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 0cdc3999ecfa..c5188ff724d1 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -173,7 +173,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
173 if (snd_BUG_ON(!stream)) 173 if (snd_BUG_ON(!stream))
174 return; 174 return;
175 175
176 stream->runtime->state = SNDRV_PCM_STATE_SETUP; 176 if (stream->direction == SND_COMPRESS_PLAYBACK)
177 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
178 else
179 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
180
177 wake_up(&stream->runtime->sleep); 181 wake_up(&stream->runtime->sleep);
178} 182}
179 183
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 7fa48b100936..cc7c8d42d4fd 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -68,6 +68,7 @@ struct hda_bus {
68 unsigned int response_reset:1; /* controller was reset */ 68 unsigned int response_reset:1; /* controller was reset */
69 unsigned int in_reset:1; /* during reset operation */ 69 unsigned int in_reset:1; /* during reset operation */
70 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ 70 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
71 unsigned int bus_probing :1; /* during probing process */
71 72
72 int primary_dig_out_type; /* primary digital out PCM type */ 73 int primary_dig_out_type; /* primary digital out PCM type */
73 unsigned int mixer_assigned; /* codec addr for mixer name */ 74 unsigned int mixer_assigned; /* codec addr for mixer name */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 8ec1de856ee7..e665f111b0d2 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -985,6 +985,12 @@ struct snd_soc_dai_link {
985 /* Do not create a PCM for this DAI link (Backend link) */ 985 /* Do not create a PCM for this DAI link (Backend link) */
986 unsigned int ignore:1; 986 unsigned int ignore:1;
987 987
988 /*
989 * This driver uses legacy platform naming. Set by the core, machine
990 * drivers should not modify this value.
991 */
992 unsigned int legacy_platform:1;
993
988 struct list_head list; /* DAI link list of the soc card */ 994 struct list_head list; /* DAI link list of the soc card */
989 struct snd_soc_dobj dobj; /* For topology */ 995 struct snd_soc_dobj dobj; /* For topology */
990}; 996};
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 33d291888ba9..e3f005eae1f7 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -25,6 +25,7 @@
25enum afs_call_trace { 25enum afs_call_trace {
26 afs_call_trace_alloc, 26 afs_call_trace_alloc,
27 afs_call_trace_free, 27 afs_call_trace_free,
28 afs_call_trace_get,
28 afs_call_trace_put, 29 afs_call_trace_put,
29 afs_call_trace_wake, 30 afs_call_trace_wake,
30 afs_call_trace_work, 31 afs_call_trace_work,
@@ -159,6 +160,7 @@ enum afs_file_error {
159#define afs_call_traces \ 160#define afs_call_traces \
160 EM(afs_call_trace_alloc, "ALLOC") \ 161 EM(afs_call_trace_alloc, "ALLOC") \
161 EM(afs_call_trace_free, "FREE ") \ 162 EM(afs_call_trace_free, "FREE ") \
163 EM(afs_call_trace_get, "GET ") \
162 EM(afs_call_trace_put, "PUT ") \ 164 EM(afs_call_trace_put, "PUT ") \
163 EM(afs_call_trace_wake, "WAKE ") \ 165 EM(afs_call_trace_wake, "WAKE ") \
164 E_(afs_call_trace_work, "WORK ") 166 E_(afs_call_trace_work, "WORK ")
diff --git a/include/uapi/linux/android/binder_ctl.h b/include/uapi/linux/android/binderfs.h
index 65b2efd1a0a5..87410477aea9 100644
--- a/include/uapi/linux/android/binder_ctl.h
+++ b/include/uapi/linux/android/binderfs.h
@@ -4,8 +4,8 @@
4 * 4 *
5 */ 5 */
6 6
7#ifndef _UAPI_LINUX_BINDER_CTL_H 7#ifndef _UAPI_LINUX_BINDERFS_H
8#define _UAPI_LINUX_BINDER_CTL_H 8#define _UAPI_LINUX_BINDERFS_H
9 9
10#include <linux/android/binder.h> 10#include <linux/android/binder.h>
11#include <linux/types.h> 11#include <linux/types.h>
@@ -22,8 +22,8 @@
22 */ 22 */
23struct binderfs_device { 23struct binderfs_device {
24 char name[BINDERFS_MAX_NAME + 1]; 24 char name[BINDERFS_MAX_NAME + 1];
25 __u8 major; 25 __u32 major;
26 __u8 minor; 26 __u32 minor;
27}; 27};
28 28
29/** 29/**
@@ -31,5 +31,5 @@ struct binderfs_device {
31 */ 31 */
32#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device) 32#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device)
33 33
34#endif /* _UAPI_LINUX_BINDER_CTL_H */ 34#endif /* _UAPI_LINUX_BINDERFS_H */
35 35
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 36a7e3f18e69..f28acd952d03 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -400,6 +400,8 @@ enum {
400/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */ 400/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */
401#define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT) 401#define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT)
402#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 402#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
403#define AUDIT_ARCH_RISCV32 (EM_RISCV|__AUDIT_ARCH_LE)
404#define AUDIT_ARCH_RISCV64 (EM_RISCV|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
403#define AUDIT_ARCH_S390 (EM_S390) 405#define AUDIT_ARCH_S390 (EM_S390)
404#define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT) 406#define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT)
405#define AUDIT_ARCH_SH (EM_SH) 407#define AUDIT_ARCH_SH (EM_SH)
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 6fa38d001d84..498eec813494 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -138,6 +138,7 @@ struct blk_zone_range {
138 * @BLKRESETZONE: Reset the write pointer of the zones in the specified 138 * @BLKRESETZONE: Reset the write pointer of the zones in the specified
139 * sector range. The sector range must be zone aligned. 139 * sector range. The sector range must be zone aligned.
140 * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors. 140 * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
141 * @BLKGETNRZONES: Get the total number of zones of the device.
141 */ 142 */
142#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report) 143#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
143#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range) 144#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index f6052e70bf40..a55cb8b10165 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -268,7 +268,7 @@ struct sockaddr_in {
268#define IN_MULTICAST(a) IN_CLASSD(a) 268#define IN_MULTICAST(a) IN_CLASSD(a)
269#define IN_MULTICAST_NET 0xe0000000 269#define IN_MULTICAST_NET 0xe0000000
270 270
271#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) 271#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff)
272#define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) 272#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
273 273
274#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) 274#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 14565d703291..e8baca85bac6 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -137,15 +137,21 @@ enum {
137 INET_DIAG_TCLASS, 137 INET_DIAG_TCLASS,
138 INET_DIAG_SKMEMINFO, 138 INET_DIAG_SKMEMINFO,
139 INET_DIAG_SHUTDOWN, 139 INET_DIAG_SHUTDOWN,
140 INET_DIAG_DCTCPINFO, 140
141 INET_DIAG_PROTOCOL, /* response attribute only */ 141 /*
142 * Next extenstions cannot be requested in struct inet_diag_req_v2:
143 * its field idiag_ext has only 8 bits.
144 */
145
146 INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
147 INET_DIAG_PROTOCOL, /* response attribute only */
142 INET_DIAG_SKV6ONLY, 148 INET_DIAG_SKV6ONLY,
143 INET_DIAG_LOCALS, 149 INET_DIAG_LOCALS,
144 INET_DIAG_PEERS, 150 INET_DIAG_PEERS,
145 INET_DIAG_PAD, 151 INET_DIAG_PAD,
146 INET_DIAG_MARK, 152 INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
147 INET_DIAG_BBRINFO, 153 INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
148 INET_DIAG_CLASS_ID, 154 INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
149 INET_DIAG_MD5SIG, 155 INET_DIAG_MD5SIG,
150 __INET_DIAG_MAX, 156 __INET_DIAG_MAX,
151}; 157};
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index fb78f6f500f3..f056b2a00d5c 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -26,13 +26,17 @@
26 */ 26 */
27 27
28struct input_event { 28struct input_event {
29#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL) 29#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
30 struct timeval time; 30 struct timeval time;
31#define input_event_sec time.tv_sec 31#define input_event_sec time.tv_sec
32#define input_event_usec time.tv_usec 32#define input_event_usec time.tv_usec
33#else 33#else
34 __kernel_ulong_t __sec; 34 __kernel_ulong_t __sec;
35#if defined(__sparc__) && defined(__arch64__)
36 unsigned int __usec;
37#else
35 __kernel_ulong_t __usec; 38 __kernel_ulong_t __usec;
39#endif
36#define input_event_sec __sec 40#define input_event_sec __sec
37#define input_event_usec __usec 41#define input_event_usec __usec
38#endif 42#endif
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index d73d83950265..1bc794ad957a 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -147,7 +147,7 @@ struct ptp_pin_desc {
147#define PTP_SYS_OFFSET_PRECISE \ 147#define PTP_SYS_OFFSET_PRECISE \
148 _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) 148 _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
149#define PTP_SYS_OFFSET_EXTENDED \ 149#define PTP_SYS_OFFSET_EXTENDED \
150 _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) 150 _IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
151 151
152struct ptp_extts_event { 152struct ptp_extts_event {
153 struct ptp_clock_time t; /* Time event occured. */ 153 struct ptp_clock_time t; /* Time event occured. */
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 1196e1c1d4f6..ff8e7dc9d4dd 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -79,6 +79,12 @@
79#define VIRTIO_F_RING_PACKED 34 79#define VIRTIO_F_RING_PACKED 34
80 80
81/* 81/*
82 * This feature indicates that memory accesses by the driver and the
83 * device are ordered in a way described by the platform.
84 */
85#define VIRTIO_F_ORDER_PLATFORM 36
86
87/*
82 * Does the device support Single Root I/O Virtualization? 88 * Does the device support Single Root I/O Virtualization?
83 */ 89 */
84#define VIRTIO_F_SR_IOV 37 90#define VIRTIO_F_SR_IOV 37
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 2414f8af26b3..4c4e24c291a5 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -213,14 +213,4 @@ struct vring_packed_desc {
213 __le16 flags; 213 __le16 flags;
214}; 214};
215 215
216struct vring_packed {
217 unsigned int num;
218
219 struct vring_packed_desc *desc;
220
221 struct vring_packed_desc_event *driver;
222
223 struct vring_packed_desc_event *device;
224};
225
226#endif /* _UAPI_LINUX_VIRTIO_RING_H */ 216#endif /* _UAPI_LINUX_VIRTIO_RING_H */
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index ef3c7ec793a7..eb76b38a00d4 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -52,6 +52,11 @@ struct hns_roce_ib_create_srq {
52 __aligned_u64 que_addr; 52 __aligned_u64 que_addr;
53}; 53};
54 54
55struct hns_roce_ib_create_srq_resp {
56 __u32 srqn;
57 __u32 reserved;
58};
59
55struct hns_roce_ib_create_qp { 60struct hns_roce_ib_create_qp {
56 __aligned_u64 buf_addr; 61 __aligned_u64 buf_addr;
57 __aligned_u64 db_addr; 62 __aligned_u64 db_addr;
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index d13fd490b66d..6e73f0274e41 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -78,6 +78,7 @@ enum pvrdma_wr_opcode {
78 PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, 78 PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
79 PVRDMA_WR_BIND_MW, 79 PVRDMA_WR_BIND_MW,
80 PVRDMA_WR_REG_SIG_MR, 80 PVRDMA_WR_REG_SIG_MR,
81 PVRDMA_WR_ERROR,
81}; 82};
82 83
83enum pvrdma_wc_status { 84enum pvrdma_wc_status {
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
index 59a260712a56..2ca9164a79bf 100644
--- a/include/xen/arm/page-coherent.h
+++ b/include/xen/arm/page-coherent.h
@@ -1,17 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H 2#ifndef _XEN_ARM_PAGE_COHERENT_H
3#define _ASM_ARM_XEN_PAGE_COHERENT_H 3#define _XEN_ARM_PAGE_COHERENT_H
4
5#include <asm/page.h>
6#include <asm/dma-mapping.h>
7#include <linux/dma-mapping.h>
8
9static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
10{
11 if (dev && dev->archdata.dev_dma_ops)
12 return dev->archdata.dev_dma_ops;
13 return get_arch_dma_ops(NULL);
14}
15 4
16void __xen_dma_map_page(struct device *hwdev, struct page *page, 5void __xen_dma_map_page(struct device *hwdev, struct page *page,
17 dma_addr_t dev_addr, unsigned long offset, size_t size, 6 dma_addr_t dev_addr, unsigned long offset, size_t size,
@@ -21,87 +10,7 @@ void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
21 unsigned long attrs); 10 unsigned long attrs);
22void __xen_dma_sync_single_for_cpu(struct device *hwdev, 11void __xen_dma_sync_single_for_cpu(struct device *hwdev,
23 dma_addr_t handle, size_t size, enum dma_data_direction dir); 12 dma_addr_t handle, size_t size, enum dma_data_direction dir);
24
25void __xen_dma_sync_single_for_device(struct device *hwdev, 13void __xen_dma_sync_single_for_device(struct device *hwdev,
26 dma_addr_t handle, size_t size, enum dma_data_direction dir); 14 dma_addr_t handle, size_t size, enum dma_data_direction dir);
27 15
28static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 16#endif /* _XEN_ARM_PAGE_COHERENT_H */
29 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
30{
31 return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
32}
33
34static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
35 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
36{
37 xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
38}
39
40static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
41 dma_addr_t dev_addr, unsigned long offset, size_t size,
42 enum dma_data_direction dir, unsigned long attrs)
43{
44 unsigned long page_pfn = page_to_xen_pfn(page);
45 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
46 unsigned long compound_pages =
47 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
48 bool local = (page_pfn <= dev_pfn) &&
49 (dev_pfn - page_pfn < compound_pages);
50
51 /*
52 * Dom0 is mapped 1:1, while the Linux page can span across
53 * multiple Xen pages, it's not possible for it to contain a
54 * mix of local and foreign Xen pages. So if the first xen_pfn
55 * == mfn the page is local otherwise it's a foreign page
56 * grant-mapped in dom0. If the page is local we can safely
57 * call the native dma_ops function, otherwise we call the xen
58 * specific function.
59 */
60 if (local)
61 xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
62 else
63 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
64}
65
66static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
67 size_t size, enum dma_data_direction dir, unsigned long attrs)
68{
69 unsigned long pfn = PFN_DOWN(handle);
70 /*
71 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
72 * multiple Xen page, it's not possible to have a mix of local and
73 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
74 * foreign mfn will always return false. If the page is local we can
75 * safely call the native dma_ops function, otherwise we call the xen
76 * specific function.
77 */
78 if (pfn_valid(pfn)) {
79 if (xen_get_dma_ops(hwdev)->unmap_page)
80 xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
81 } else
82 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
83}
84
85static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
86 dma_addr_t handle, size_t size, enum dma_data_direction dir)
87{
88 unsigned long pfn = PFN_DOWN(handle);
89 if (pfn_valid(pfn)) {
90 if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
91 xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
92 } else
93 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
94}
95
96static inline void xen_dma_sync_single_for_device(struct device *hwdev,
97 dma_addr_t handle, size_t size, enum dma_data_direction dir)
98{
99 unsigned long pfn = PFN_DOWN(handle);
100 if (pfn_valid(pfn)) {
101 if (xen_get_dma_ops(hwdev)->sync_single_for_device)
102 xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
103 } else
104 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
105}
106
107#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/init/Kconfig b/init/Kconfig
index d47cb77a220e..c9386a365eea 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -512,6 +512,17 @@ config PSI_DEFAULT_DISABLED
512 per default but can be enabled through passing psi=1 on the 512 per default but can be enabled through passing psi=1 on the
513 kernel commandline during boot. 513 kernel commandline during boot.
514 514
515 This feature adds some code to the task wakeup and sleep
516 paths of the scheduler. The overhead is too low to affect
517 common scheduling-intense workloads in practice (such as
518 webservers, memcache), but it does show up in artificial
519 scheduler stress tests, such as hackbench.
520
521 If you are paranoid and not sure what the kernel will be
522 used for, say Y.
523
524 Say N if unsure.
525
515endmenu # "CPU/Task time and stats accounting" 526endmenu # "CPU/Task time and stats accounting"
516 527
517config CPU_ISOLATION 528config CPU_ISOLATION
@@ -825,7 +836,7 @@ config CGROUP_PIDS
825 PIDs controller is designed to stop this from happening. 836 PIDs controller is designed to stop this from happening.
826 837
827 It should be noted that organisational operations (such as attaching 838 It should be noted that organisational operations (such as attaching
828 to a cgroup hierarchy will *not* be blocked by the PIDs controller), 839 to a cgroup hierarchy) will *not* be blocked by the PIDs controller,
829 since the PIDs limit only affects a process's ability to fork, not to 840 since the PIDs limit only affects a process's ability to fork, not to
830 attach to a cgroup. 841 attach to a cgroup.
831 842
@@ -1124,6 +1135,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION
1124 bool "Dead code and data elimination (EXPERIMENTAL)" 1135 bool "Dead code and data elimination (EXPERIMENTAL)"
1125 depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION 1136 depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
1126 depends on EXPERT 1137 depends on EXPERT
1138 depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
1127 depends on $(cc-option,-ffunction-sections -fdata-sections) 1139 depends on $(cc-option,-ffunction-sections -fdata-sections)
1128 depends on $(ld-option,--gc-sections) 1140 depends on $(ld-option,--gc-sections)
1129 help 1141 help
diff --git a/init/initramfs.c b/init/initramfs.c
index 7cea802d00ef..fca899622937 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -550,6 +550,7 @@ skip:
550 initrd_end = 0; 550 initrd_end = 0;
551} 551}
552 552
553#ifdef CONFIG_BLK_DEV_RAM
553#define BUF_SIZE 1024 554#define BUF_SIZE 1024
554static void __init clean_rootfs(void) 555static void __init clean_rootfs(void)
555{ 556{
@@ -596,6 +597,7 @@ static void __init clean_rootfs(void)
596 ksys_close(fd); 597 ksys_close(fd);
597 kfree(buf); 598 kfree(buf);
598} 599}
600#endif
599 601
600static int __init populate_rootfs(void) 602static int __init populate_rootfs(void)
601{ 603{
@@ -638,10 +640,8 @@ static int __init populate_rootfs(void)
638 printk(KERN_INFO "Unpacking initramfs...\n"); 640 printk(KERN_INFO "Unpacking initramfs...\n");
639 err = unpack_to_rootfs((char *)initrd_start, 641 err = unpack_to_rootfs((char *)initrd_start,
640 initrd_end - initrd_start); 642 initrd_end - initrd_start);
641 if (err) { 643 if (err)
642 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); 644 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
643 clean_rootfs();
644 }
645 free_initrd(); 645 free_initrd();
646#endif 646#endif
647 } 647 }
diff --git a/init/main.c b/init/main.c
index e2e80ca3165a..c86a1c8f19f4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -695,7 +695,6 @@ asmlinkage __visible void __init start_kernel(void)
695 initrd_start = 0; 695 initrd_start = 0;
696 } 696 }
697#endif 697#endif
698 page_ext_init();
699 kmemleak_init(); 698 kmemleak_init();
700 setup_per_cpu_pageset(); 699 setup_per_cpu_pageset();
701 numa_policy_init(); 700 numa_policy_init();
@@ -1131,6 +1130,8 @@ static noinline void __init kernel_init_freeable(void)
1131 sched_init_smp(); 1130 sched_init_smp();
1132 1131
1133 page_alloc_init_late(); 1132 page_alloc_init_late();
1133 /* Initialize page ext after all struct pages are initialized. */
1134 page_ext_init();
1134 1135
1135 do_basic_setup(); 1136 do_basic_setup();
1136 1137
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 715f9fcf4712..c57bd10340ed 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -467,7 +467,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
467 return kind_ops[BTF_INFO_KIND(t->info)]; 467 return kind_ops[BTF_INFO_KIND(t->info)];
468} 468}
469 469
470bool btf_name_offset_valid(const struct btf *btf, u32 offset) 470static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
471{ 471{
472 return BTF_STR_OFFSET_VALID(offset) && 472 return BTF_STR_OFFSET_VALID(offset) &&
473 offset < btf->hdr.str_len; 473 offset < btf->hdr.str_len;
@@ -1219,8 +1219,6 @@ static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1219 u8 nr_copy_bits; 1219 u8 nr_copy_bits;
1220 u64 print_num; 1220 u64 print_num;
1221 1221
1222 data += BITS_ROUNDDOWN_BYTES(bits_offset);
1223 bits_offset = BITS_PER_BYTE_MASKED(bits_offset);
1224 nr_copy_bits = nr_bits + bits_offset; 1222 nr_copy_bits = nr_bits + bits_offset;
1225 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 1223 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1226 1224
@@ -1255,7 +1253,9 @@ static void btf_int_bits_seq_show(const struct btf *btf,
1255 * BTF_INT_OFFSET() cannot exceed 64 bits. 1253 * BTF_INT_OFFSET() cannot exceed 64 bits.
1256 */ 1254 */
1257 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 1255 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1258 btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m); 1256 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1257 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1258 btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1259} 1259}
1260 1260
1261static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, 1261static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
@@ -1459,7 +1459,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
1459 1459
1460 /* "typedef void new_void", "const void"...etc */ 1460 /* "typedef void new_void", "const void"...etc */
1461 if (!btf_type_is_void(next_type) && 1461 if (!btf_type_is_void(next_type) &&
1462 !btf_type_is_fwd(next_type)) { 1462 !btf_type_is_fwd(next_type) &&
1463 !btf_type_is_func_proto(next_type)) {
1463 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1464 btf_verifier_log_type(env, v->t, "Invalid type_id");
1464 return -EINVAL; 1465 return -EINVAL;
1465 } 1466 }
@@ -2001,12 +2002,12 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2001 2002
2002 member_offset = btf_member_bit_offset(t, member); 2003 member_offset = btf_member_bit_offset(t, member);
2003 bitfield_size = btf_member_bitfield_size(t, member); 2004 bitfield_size = btf_member_bitfield_size(t, member);
2005 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2006 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2004 if (bitfield_size) { 2007 if (bitfield_size) {
2005 btf_bitfield_seq_show(data, member_offset, 2008 btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2006 bitfield_size, m); 2009 bitfield_size, m);
2007 } else { 2010 } else {
2008 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2009 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2010 ops = btf_type_ops(member_type); 2011 ops = btf_type_ops(member_type);
2011 ops->seq_show(btf, member_type, member->type, 2012 ops->seq_show(btf, member_type, member->type,
2012 data + bytes_offset, bits8_offset, m); 2013 data + bytes_offset, bits8_offset, m);
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 9425c2fb872f..d17d05570a3f 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
572 bpf_compute_and_save_data_end(skb, &saved_data_end); 572 bpf_compute_and_save_data_end(skb, &saved_data_end);
573 573
574 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, 574 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
575 bpf_prog_run_save_cb); 575 __bpf_prog_run_save_cb);
576 bpf_restore_data_end(skb, saved_data_end); 576 bpf_restore_data_end(skb, saved_data_end);
577 __skb_pull(skb, offset); 577 __skb_pull(skb, offset);
578 skb->sk = save_sk; 578 skb->sk = save_sk;
@@ -718,6 +718,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
718 case BPF_FUNC_trace_printk: 718 case BPF_FUNC_trace_printk:
719 if (capable(CAP_SYS_ADMIN)) 719 if (capable(CAP_SYS_ADMIN))
720 return bpf_get_trace_printk_proto(); 720 return bpf_get_trace_printk_proto();
721 /* fall through */
721 default: 722 default:
722 return NULL; 723 return NULL;
723 } 724 }
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 4b7c76765d9d..f9274114c88d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
686 } 686 }
687 687
688 if (htab_is_prealloc(htab)) { 688 if (htab_is_prealloc(htab)) {
689 pcpu_freelist_push(&htab->freelist, &l->fnode); 689 __pcpu_freelist_push(&htab->freelist, &l->fnode);
690 } else { 690 } else {
691 atomic_dec(&htab->count); 691 atomic_dec(&htab->count);
692 l->htab = htab; 692 l->htab = htab;
@@ -748,7 +748,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
748 } else { 748 } else {
749 struct pcpu_freelist_node *l; 749 struct pcpu_freelist_node *l;
750 750
751 l = pcpu_freelist_pop(&htab->freelist); 751 l = __pcpu_freelist_pop(&htab->freelist);
752 if (!l) 752 if (!l)
753 return ERR_PTR(-E2BIG); 753 return ERR_PTR(-E2BIG);
754 l_new = container_of(l, struct htab_elem, fnode); 754 l_new = container_of(l, struct htab_elem, fnode);
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index abf1002080df..93a5cbbde421 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -471,6 +471,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
471 } 471 }
472 472
473 if (!node || node->prefixlen != key->prefixlen || 473 if (!node || node->prefixlen != key->prefixlen ||
474 node->prefixlen != matchlen ||
474 (node->flags & LPM_TREE_NODE_FLAG_IM)) { 475 (node->flags & LPM_TREE_NODE_FLAG_IM)) {
475 ret = -ENOENT; 476 ret = -ENOENT;
476 goto out; 477 goto out;
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 99d243e1ad6e..52378d3e34b3 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -12,6 +12,7 @@
12struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) 12struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
13{ 13{
14 struct bpf_map *inner_map, *inner_map_meta; 14 struct bpf_map *inner_map, *inner_map_meta;
15 u32 inner_map_meta_size;
15 struct fd f; 16 struct fd f;
16 17
17 f = fdget(inner_map_ufd); 18 f = fdget(inner_map_ufd);
@@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
36 return ERR_PTR(-EINVAL); 37 return ERR_PTR(-EINVAL);
37 } 38 }
38 39
39 inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); 40 inner_map_meta_size = sizeof(*inner_map_meta);
41 /* In some cases verifier needs to access beyond just base map. */
42 if (inner_map->ops == &array_map_ops)
43 inner_map_meta_size = sizeof(struct bpf_array);
44
45 inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
40 if (!inner_map_meta) { 46 if (!inner_map_meta) {
41 fdput(f); 47 fdput(f);
42 return ERR_PTR(-ENOMEM); 48 return ERR_PTR(-ENOMEM);
@@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
46 inner_map_meta->key_size = inner_map->key_size; 52 inner_map_meta->key_size = inner_map->key_size;
47 inner_map_meta->value_size = inner_map->value_size; 53 inner_map_meta->value_size = inner_map->value_size;
48 inner_map_meta->map_flags = inner_map->map_flags; 54 inner_map_meta->map_flags = inner_map->map_flags;
49 inner_map_meta->ops = inner_map->ops;
50 inner_map_meta->max_entries = inner_map->max_entries; 55 inner_map_meta->max_entries = inner_map->max_entries;
51 56
57 /* Misc members not needed in bpf_map_meta_equal() check. */
58 inner_map_meta->ops = inner_map->ops;
59 if (inner_map->ops == &array_map_ops) {
60 inner_map_meta->unpriv_array = inner_map->unpriv_array;
61 container_of(inner_map_meta, struct bpf_array, map)->index_mask =
62 container_of(inner_map, struct bpf_array, map)->index_mask;
63 }
64
52 fdput(f); 65 fdput(f);
53 return inner_map_meta; 66 return inner_map_meta;
54} 67}
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 673fa6fe2d73..0c1b4ba9e90e 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
28 free_percpu(s->freelist); 28 free_percpu(s->freelist);
29} 29}
30 30
31static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, 31static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
32 struct pcpu_freelist_node *node) 32 struct pcpu_freelist_node *node)
33{ 33{
34 raw_spin_lock(&head->lock); 34 raw_spin_lock(&head->lock);
35 node->next = head->first; 35 node->next = head->first;
@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
37 raw_spin_unlock(&head->lock); 37 raw_spin_unlock(&head->lock);
38} 38}
39 39
40void pcpu_freelist_push(struct pcpu_freelist *s, 40void __pcpu_freelist_push(struct pcpu_freelist *s,
41 struct pcpu_freelist_node *node) 41 struct pcpu_freelist_node *node)
42{ 42{
43 struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); 43 struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
44 44
45 __pcpu_freelist_push(head, node); 45 ___pcpu_freelist_push(head, node);
46}
47
48void pcpu_freelist_push(struct pcpu_freelist *s,
49 struct pcpu_freelist_node *node)
50{
51 unsigned long flags;
52
53 local_irq_save(flags);
54 __pcpu_freelist_push(s, node);
55 local_irq_restore(flags);
46} 56}
47 57
48void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, 58void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
63 for_each_possible_cpu(cpu) { 73 for_each_possible_cpu(cpu) {
64again: 74again:
65 head = per_cpu_ptr(s->freelist, cpu); 75 head = per_cpu_ptr(s->freelist, cpu);
66 __pcpu_freelist_push(head, buf); 76 ___pcpu_freelist_push(head, buf);
67 i++; 77 i++;
68 buf += elem_size; 78 buf += elem_size;
69 if (i == nr_elems) 79 if (i == nr_elems)
@@ -74,14 +84,12 @@ again:
74 local_irq_restore(flags); 84 local_irq_restore(flags);
75} 85}
76 86
77struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) 87struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
78{ 88{
79 struct pcpu_freelist_head *head; 89 struct pcpu_freelist_head *head;
80 struct pcpu_freelist_node *node; 90 struct pcpu_freelist_node *node;
81 unsigned long flags;
82 int orig_cpu, cpu; 91 int orig_cpu, cpu;
83 92
84 local_irq_save(flags);
85 orig_cpu = cpu = raw_smp_processor_id(); 93 orig_cpu = cpu = raw_smp_processor_id();
86 while (1) { 94 while (1) {
87 head = per_cpu_ptr(s->freelist, cpu); 95 head = per_cpu_ptr(s->freelist, cpu);
@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
89 node = head->first; 97 node = head->first;
90 if (node) { 98 if (node) {
91 head->first = node->next; 99 head->first = node->next;
92 raw_spin_unlock_irqrestore(&head->lock, flags); 100 raw_spin_unlock(&head->lock);
93 return node; 101 return node;
94 } 102 }
95 raw_spin_unlock(&head->lock); 103 raw_spin_unlock(&head->lock);
96 cpu = cpumask_next(cpu, cpu_possible_mask); 104 cpu = cpumask_next(cpu, cpu_possible_mask);
97 if (cpu >= nr_cpu_ids) 105 if (cpu >= nr_cpu_ids)
98 cpu = 0; 106 cpu = 0;
99 if (cpu == orig_cpu) { 107 if (cpu == orig_cpu)
100 local_irq_restore(flags);
101 return NULL; 108 return NULL;
102 }
103 } 109 }
104} 110}
111
112struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
113{
114 struct pcpu_freelist_node *ret;
115 unsigned long flags;
116
117 local_irq_save(flags);
118 ret = __pcpu_freelist_pop(s);
119 local_irq_restore(flags);
120 return ret;
121}
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
index 3049aae8ea1e..c3960118e617 100644
--- a/kernel/bpf/percpu_freelist.h
+++ b/kernel/bpf/percpu_freelist.h
@@ -22,8 +22,12 @@ struct pcpu_freelist_node {
22 struct pcpu_freelist_node *next; 22 struct pcpu_freelist_node *next;
23}; 23};
24 24
25/* pcpu_freelist_* do spin_lock_irqsave. */
25void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); 26void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
26struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); 27struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
28/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
29void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
30struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
27void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, 31void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
28 u32 nr_elems); 32 u32 nr_elems);
29int pcpu_freelist_init(struct pcpu_freelist *); 33int pcpu_freelist_init(struct pcpu_freelist *);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 90daf285de03..950ab2f28922 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
44 struct stack_map_irq_work *work; 44 struct stack_map_irq_work *work;
45 45
46 work = container_of(entry, struct stack_map_irq_work, irq_work); 46 work = container_of(entry, struct stack_map_irq_work, irq_work);
47 up_read(work->sem); 47 up_read_non_owner(work->sem);
48 work->sem = NULL; 48 work->sem = NULL;
49} 49}
50 50
@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
180 180
181 if (nhdr->n_type == BPF_BUILD_ID && 181 if (nhdr->n_type == BPF_BUILD_ID &&
182 nhdr->n_namesz == sizeof("GNU") && 182 nhdr->n_namesz == sizeof("GNU") &&
183 nhdr->n_descsz == BPF_BUILD_ID_SIZE) { 183 nhdr->n_descsz > 0 &&
184 nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
184 memcpy(build_id, 185 memcpy(build_id,
185 note_start + note_offs + 186 note_start + note_offs +
186 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), 187 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
187 BPF_BUILD_ID_SIZE); 188 nhdr->n_descsz);
189 memset(build_id + nhdr->n_descsz, 0,
190 BPF_BUILD_ID_SIZE - nhdr->n_descsz);
188 return 0; 191 return 0;
189 } 192 }
190 new_offs = note_offs + sizeof(Elf32_Nhdr) + 193 new_offs = note_offs + sizeof(Elf32_Nhdr) +
@@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
260 return -EFAULT; /* page not mapped */ 263 return -EFAULT; /* page not mapped */
261 264
262 ret = -EINVAL; 265 ret = -EINVAL;
263 page_addr = page_address(page); 266 page_addr = kmap_atomic(page);
264 ehdr = (Elf32_Ehdr *)page_addr; 267 ehdr = (Elf32_Ehdr *)page_addr;
265 268
266 /* compare magic x7f "ELF" */ 269 /* compare magic x7f "ELF" */
@@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
276 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) 279 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
277 ret = stack_map_get_build_id_64(page_addr, build_id); 280 ret = stack_map_get_build_id_64(page_addr, build_id);
278out: 281out:
282 kunmap_atomic(page_addr);
279 put_page(page); 283 put_page(page);
280 return ret; 284 return ret;
281} 285}
@@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
310 for (i = 0; i < trace_nr; i++) { 314 for (i = 0; i < trace_nr; i++) {
311 id_offs[i].status = BPF_STACK_BUILD_ID_IP; 315 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
312 id_offs[i].ip = ips[i]; 316 id_offs[i].ip = ips[i];
317 memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
313 } 318 }
314 return; 319 return;
315 } 320 }
@@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
320 /* per entry fall back to ips */ 325 /* per entry fall back to ips */
321 id_offs[i].status = BPF_STACK_BUILD_ID_IP; 326 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
322 id_offs[i].ip = ips[i]; 327 id_offs[i].ip = ips[i];
328 memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
323 continue; 329 continue;
324 } 330 }
325 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] 331 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
@@ -332,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
332 } else { 338 } else {
333 work->sem = &current->mm->mmap_sem; 339 work->sem = &current->mm->mmap_sem;
334 irq_work_queue(&work->irq_work); 340 irq_work_queue(&work->irq_work);
341 /*
342 * The irq_work will release the mmap_sem with
343 * up_read_non_owner(). The rwsem_release() is called
344 * here to release the lock from lockdep's perspective.
345 */
346 rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
335 } 347 }
336} 348}
337 349
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index b155cd17c1bd..8577bb7f8be6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -713,8 +713,13 @@ static int map_lookup_elem(union bpf_attr *attr)
713 713
714 if (bpf_map_is_dev_bound(map)) { 714 if (bpf_map_is_dev_bound(map)) {
715 err = bpf_map_offload_lookup_elem(map, key, value); 715 err = bpf_map_offload_lookup_elem(map, key, value);
716 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 716 goto done;
717 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 717 }
718
719 preempt_disable();
720 this_cpu_inc(bpf_prog_active);
721 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
722 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
718 err = bpf_percpu_hash_copy(map, key, value); 723 err = bpf_percpu_hash_copy(map, key, value);
719 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 724 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
720 err = bpf_percpu_array_copy(map, key, value); 725 err = bpf_percpu_array_copy(map, key, value);
@@ -744,7 +749,10 @@ static int map_lookup_elem(union bpf_attr *attr)
744 } 749 }
745 rcu_read_unlock(); 750 rcu_read_unlock();
746 } 751 }
752 this_cpu_dec(bpf_prog_active);
753 preempt_enable();
747 754
755done:
748 if (err) 756 if (err)
749 goto free_value; 757 goto free_value;
750 758
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f6bc62a9ee8e..8f295b790297 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
1617 return 0; 1617 return 0;
1618} 1618}
1619 1619
1620static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, 1620static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
1621 int size, enum bpf_access_type t) 1621 u32 regno, int off, int size,
1622 enum bpf_access_type t)
1622{ 1623{
1623 struct bpf_reg_state *regs = cur_regs(env); 1624 struct bpf_reg_state *regs = cur_regs(env);
1624 struct bpf_reg_state *reg = &regs[regno]; 1625 struct bpf_reg_state *reg = &regs[regno];
1625 struct bpf_insn_access_aux info; 1626 struct bpf_insn_access_aux info = {};
1626 1627
1627 if (reg->smin_value < 0) { 1628 if (reg->smin_value < 0) {
1628 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 1629 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
@@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
1636 return -EACCES; 1637 return -EACCES;
1637 } 1638 }
1638 1639
1640 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1641
1639 return 0; 1642 return 0;
1640} 1643}
1641 1644
@@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
2032 verbose(env, "cannot write into socket\n"); 2035 verbose(env, "cannot write into socket\n");
2033 return -EACCES; 2036 return -EACCES;
2034 } 2037 }
2035 err = check_sock_access(env, regno, off, size, t); 2038 err = check_sock_access(env, insn_idx, regno, off, size, t);
2036 if (!err && value_regno >= 0) 2039 if (!err && value_regno >= 0)
2037 mark_reg_unknown(env, regs, value_regno); 2040 mark_reg_unknown(env, regs, value_regno);
2038 } else { 2041 } else {
@@ -3103,6 +3106,40 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
3103 } 3106 }
3104} 3107}
3105 3108
3109static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
3110 const struct bpf_insn *insn)
3111{
3112 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
3113}
3114
3115static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
3116 u32 alu_state, u32 alu_limit)
3117{
3118 /* If we arrived here from different branches with different
3119 * state or limits to sanitize, then this won't work.
3120 */
3121 if (aux->alu_state &&
3122 (aux->alu_state != alu_state ||
3123 aux->alu_limit != alu_limit))
3124 return -EACCES;
3125
3126 /* Corresponding fixup done in fixup_bpf_calls(). */
3127 aux->alu_state = alu_state;
3128 aux->alu_limit = alu_limit;
3129 return 0;
3130}
3131
3132static int sanitize_val_alu(struct bpf_verifier_env *env,
3133 struct bpf_insn *insn)
3134{
3135 struct bpf_insn_aux_data *aux = cur_aux(env);
3136
3137 if (can_skip_alu_sanitation(env, insn))
3138 return 0;
3139
3140 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
3141}
3142
3106static int sanitize_ptr_alu(struct bpf_verifier_env *env, 3143static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3107 struct bpf_insn *insn, 3144 struct bpf_insn *insn,
3108 const struct bpf_reg_state *ptr_reg, 3145 const struct bpf_reg_state *ptr_reg,
@@ -3117,7 +3154,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3117 struct bpf_reg_state tmp; 3154 struct bpf_reg_state tmp;
3118 bool ret; 3155 bool ret;
3119 3156
3120 if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K) 3157 if (can_skip_alu_sanitation(env, insn))
3121 return 0; 3158 return 0;
3122 3159
3123 /* We already marked aux for masking from non-speculative 3160 /* We already marked aux for masking from non-speculative
@@ -3133,19 +3170,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3133 3170
3134 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) 3171 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
3135 return 0; 3172 return 0;
3136 3173 if (update_alu_sanitation_state(aux, alu_state, alu_limit))
3137 /* If we arrived here from different branches with different
3138 * limits to sanitize, then this won't work.
3139 */
3140 if (aux->alu_state &&
3141 (aux->alu_state != alu_state ||
3142 aux->alu_limit != alu_limit))
3143 return -EACCES; 3174 return -EACCES;
3144
3145 /* Corresponding fixup done in fixup_bpf_calls(). */
3146 aux->alu_state = alu_state;
3147 aux->alu_limit = alu_limit;
3148
3149do_sim: 3175do_sim:
3150 /* Simulate and find potential out-of-bounds access under 3176 /* Simulate and find potential out-of-bounds access under
3151 * speculative execution from truncation as a result of 3177 * speculative execution from truncation as a result of
@@ -3418,6 +3444,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3418 s64 smin_val, smax_val; 3444 s64 smin_val, smax_val;
3419 u64 umin_val, umax_val; 3445 u64 umin_val, umax_val;
3420 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 3446 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3447 u32 dst = insn->dst_reg;
3448 int ret;
3421 3449
3422 if (insn_bitness == 32) { 3450 if (insn_bitness == 32) {
3423 /* Relevant for 32-bit RSH: Information can propagate towards 3451 /* Relevant for 32-bit RSH: Information can propagate towards
@@ -3452,6 +3480,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3452 3480
3453 switch (opcode) { 3481 switch (opcode) {
3454 case BPF_ADD: 3482 case BPF_ADD:
3483 ret = sanitize_val_alu(env, insn);
3484 if (ret < 0) {
3485 verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
3486 return ret;
3487 }
3455 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 3488 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
3456 signed_add_overflows(dst_reg->smax_value, smax_val)) { 3489 signed_add_overflows(dst_reg->smax_value, smax_val)) {
3457 dst_reg->smin_value = S64_MIN; 3490 dst_reg->smin_value = S64_MIN;
@@ -3471,6 +3504,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3471 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 3504 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
3472 break; 3505 break;
3473 case BPF_SUB: 3506 case BPF_SUB:
3507 ret = sanitize_val_alu(env, insn);
3508 if (ret < 0) {
3509 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
3510 return ret;
3511 }
3474 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 3512 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
3475 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 3513 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
3476 /* Overflow possible, we know nothing */ 3514 /* Overflow possible, we know nothing */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 91d5c38eb7e5..d1c6d152da89 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -376,9 +376,6 @@ void __weak arch_smt_update(void) { }
376 376
377#ifdef CONFIG_HOTPLUG_SMT 377#ifdef CONFIG_HOTPLUG_SMT
378enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; 378enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
379EXPORT_SYMBOL_GPL(cpu_smt_control);
380
381static bool cpu_smt_available __read_mostly;
382 379
383void __init cpu_smt_disable(bool force) 380void __init cpu_smt_disable(bool force)
384{ 381{
@@ -397,25 +394,11 @@ void __init cpu_smt_disable(bool force)
397 394
398/* 395/*
399 * The decision whether SMT is supported can only be done after the full 396 * The decision whether SMT is supported can only be done after the full
400 * CPU identification. Called from architecture code before non boot CPUs 397 * CPU identification. Called from architecture code.
401 * are brought up.
402 */
403void __init cpu_smt_check_topology_early(void)
404{
405 if (!topology_smt_supported())
406 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
407}
408
409/*
410 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
411 * brought online. This ensures the smt/l1tf sysfs entries are consistent
412 * with reality. cpu_smt_available is set to true during the bringup of non
413 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
414 * cpu_smt_control's previous setting.
415 */ 398 */
416void __init cpu_smt_check_topology(void) 399void __init cpu_smt_check_topology(void)
417{ 400{
418 if (!cpu_smt_available) 401 if (!topology_smt_supported())
419 cpu_smt_control = CPU_SMT_NOT_SUPPORTED; 402 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
420} 403}
421 404
@@ -428,18 +411,10 @@ early_param("nosmt", smt_cmdline_disable);
428 411
429static inline bool cpu_smt_allowed(unsigned int cpu) 412static inline bool cpu_smt_allowed(unsigned int cpu)
430{ 413{
431 if (topology_is_primary_thread(cpu)) 414 if (cpu_smt_control == CPU_SMT_ENABLED)
432 return true; 415 return true;
433 416
434 /* 417 if (topology_is_primary_thread(cpu))
435 * If the CPU is not a 'primary' thread and the booted_once bit is
436 * set then the processor has SMT support. Store this information
437 * for the late check of SMT support in cpu_smt_check_topology().
438 */
439 if (per_cpu(cpuhp_state, cpu).booted_once)
440 cpu_smt_available = true;
441
442 if (cpu_smt_control == CPU_SMT_ENABLED)
443 return true; 418 return true;
444 419
445 /* 420 /*
@@ -2090,10 +2065,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2090 */ 2065 */
2091 cpuhp_offline_cpu_device(cpu); 2066 cpuhp_offline_cpu_device(cpu);
2092 } 2067 }
2093 if (!ret) { 2068 if (!ret)
2094 cpu_smt_control = ctrlval; 2069 cpu_smt_control = ctrlval;
2095 arch_smt_update();
2096 }
2097 cpu_maps_update_done(); 2070 cpu_maps_update_done();
2098 return ret; 2071 return ret;
2099} 2072}
@@ -2104,7 +2077,6 @@ static int cpuhp_smt_enable(void)
2104 2077
2105 cpu_maps_update_begin(); 2078 cpu_maps_update_begin();
2106 cpu_smt_control = CPU_SMT_ENABLED; 2079 cpu_smt_control = CPU_SMT_ENABLED;
2107 arch_smt_update();
2108 for_each_present_cpu(cpu) { 2080 for_each_present_cpu(cpu) {
2109 /* Skip online CPUs and CPUs on offline nodes */ 2081 /* Skip online CPUs and CPUs on offline nodes */
2110 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) 2082 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d6361776dc5c..1fb6fd68b9c7 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -378,6 +378,8 @@ void __init swiotlb_exit(void)
378 memblock_free_late(io_tlb_start, 378 memblock_free_late(io_tlb_start,
379 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 379 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
380 } 380 }
381 io_tlb_start = 0;
382 io_tlb_end = 0;
381 io_tlb_nslabs = 0; 383 io_tlb_nslabs = 0;
382 max_segment = 0; 384 max_segment = 0;
383} 385}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3cd13a30f732..26d6edab051a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
436 void __user *buffer, size_t *lenp, 436 void __user *buffer, size_t *lenp,
437 loff_t *ppos) 437 loff_t *ppos)
438{ 438{
439 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 439 int ret;
440 440 int perf_cpu = sysctl_perf_cpu_time_max_percent;
441 if (ret || !write)
442 return ret;
443
444 /* 441 /*
445 * If throttling is disabled don't allow the write: 442 * If throttling is disabled don't allow the write:
446 */ 443 */
447 if (sysctl_perf_cpu_time_max_percent == 100 || 444 if (write && (perf_cpu == 100 || perf_cpu == 0))
448 sysctl_perf_cpu_time_max_percent == 0)
449 return -EINVAL; 445 return -EINVAL;
450 446
447 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
448 if (ret || !write)
449 return ret;
450
451 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 451 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
452 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 452 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
453 update_perf_cpu_limits(); 453 update_perf_cpu_limits();
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
4963 } 4963 }
4964} 4964}
4965 4965
4966static int perf_event_check_period(struct perf_event *event, u64 value)
4967{
4968 return event->pmu->check_period(event, value);
4969}
4970
4966static int perf_event_period(struct perf_event *event, u64 __user *arg) 4971static int perf_event_period(struct perf_event *event, u64 __user *arg)
4967{ 4972{
4968 u64 value; 4973 u64 value;
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
4979 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4984 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4980 return -EINVAL; 4985 return -EINVAL;
4981 4986
4987 if (perf_event_check_period(event, value))
4988 return -EINVAL;
4989
4982 event_function_call(event, __perf_event_period, &value); 4990 event_function_call(event, __perf_event_period, &value);
4983 4991
4984 return 0; 4992 return 0;
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
9391 return 0; 9399 return 0;
9392} 9400}
9393 9401
9402static int perf_event_nop_int(struct perf_event *event, u64 value)
9403{
9404 return 0;
9405}
9406
9394static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 9407static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
9395 9408
9396static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) 9409static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9691,6 +9704,9 @@ got_cpu_context:
9691 pmu->pmu_disable = perf_pmu_nop_void; 9704 pmu->pmu_disable = perf_pmu_nop_void;
9692 } 9705 }
9693 9706
9707 if (!pmu->check_period)
9708 pmu->check_period = perf_event_nop_int;
9709
9694 if (!pmu->event_idx) 9710 if (!pmu->event_idx)
9695 pmu->event_idx = perf_event_idx_default; 9711 pmu->event_idx = perf_event_idx_default;
9696 9712
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 4a9937076331..5ab4fe3b1dcc 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -734,6 +734,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
734 size = sizeof(struct ring_buffer); 734 size = sizeof(struct ring_buffer);
735 size += nr_pages * sizeof(void *); 735 size += nr_pages * sizeof(void *);
736 736
737 if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
738 goto fail;
739
737 rb = kzalloc(size, GFP_KERNEL); 740 rb = kzalloc(size, GFP_KERNEL);
738 if (!rb) 741 if (!rb)
739 goto fail; 742 goto fail;
diff --git a/kernel/exit.c b/kernel/exit.c
index 2d14979577ee..2639a30a8aa5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -307,7 +307,7 @@ void rcuwait_wake_up(struct rcuwait *w)
307 * MB (A) MB (B) 307 * MB (A) MB (B)
308 * [L] cond [L] tsk 308 * [L] cond [L] tsk
309 */ 309 */
310 smp_rmb(); /* (B) */ 310 smp_mb(); /* (B) */
311 311
312 /* 312 /*
313 * Avoid using task_rcu_dereference() magic as long as we are careful, 313 * Avoid using task_rcu_dereference() magic as long as we are careful,
@@ -558,12 +558,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
558 return NULL; 558 return NULL;
559} 559}
560 560
561static struct task_struct *find_child_reaper(struct task_struct *father) 561static struct task_struct *find_child_reaper(struct task_struct *father,
562 struct list_head *dead)
562 __releases(&tasklist_lock) 563 __releases(&tasklist_lock)
563 __acquires(&tasklist_lock) 564 __acquires(&tasklist_lock)
564{ 565{
565 struct pid_namespace *pid_ns = task_active_pid_ns(father); 566 struct pid_namespace *pid_ns = task_active_pid_ns(father);
566 struct task_struct *reaper = pid_ns->child_reaper; 567 struct task_struct *reaper = pid_ns->child_reaper;
568 struct task_struct *p, *n;
567 569
568 if (likely(reaper != father)) 570 if (likely(reaper != father))
569 return reaper; 571 return reaper;
@@ -579,6 +581,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
579 panic("Attempted to kill init! exitcode=0x%08x\n", 581 panic("Attempted to kill init! exitcode=0x%08x\n",
580 father->signal->group_exit_code ?: father->exit_code); 582 father->signal->group_exit_code ?: father->exit_code);
581 } 583 }
584
585 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
586 list_del_init(&p->ptrace_entry);
587 release_task(p);
588 }
589
582 zap_pid_ns_processes(pid_ns); 590 zap_pid_ns_processes(pid_ns);
583 write_lock_irq(&tasklist_lock); 591 write_lock_irq(&tasklist_lock);
584 592
@@ -668,7 +676,7 @@ static void forget_original_parent(struct task_struct *father,
668 exit_ptrace(father, dead); 676 exit_ptrace(father, dead);
669 677
670 /* Can drop and reacquire tasklist_lock */ 678 /* Can drop and reacquire tasklist_lock */
671 reaper = find_child_reaper(father); 679 reaper = find_child_reaper(father, dead);
672 if (list_empty(&father->children)) 680 if (list_empty(&father->children))
673 return; 681 return;
674 682
@@ -866,6 +874,7 @@ void __noreturn do_exit(long code)
866 exit_task_namespaces(tsk); 874 exit_task_namespaces(tsk);
867 exit_task_work(tsk); 875 exit_task_work(tsk);
868 exit_thread(tsk); 876 exit_thread(tsk);
877 exit_umh(tsk);
869 878
870 /* 879 /*
871 * Flush inherited counters to the parent - before the parent 880 * Flush inherited counters to the parent - before the parent
diff --git a/kernel/fork.c b/kernel/fork.c
index a60459947f18..b69248e6f0e0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -217,6 +217,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
217 memset(s->addr, 0, THREAD_SIZE); 217 memset(s->addr, 0, THREAD_SIZE);
218 218
219 tsk->stack_vm_area = s; 219 tsk->stack_vm_area = s;
220 tsk->stack = s->addr;
220 return s->addr; 221 return s->addr;
221 } 222 }
222 223
@@ -1833,8 +1834,6 @@ static __latent_entropy struct task_struct *copy_process(
1833 1834
1834 posix_cpu_timers_init(p); 1835 posix_cpu_timers_init(p);
1835 1836
1836 p->start_time = ktime_get_ns();
1837 p->real_start_time = ktime_get_boot_ns();
1838 p->io_context = NULL; 1837 p->io_context = NULL;
1839 audit_set_context(p, NULL); 1838 audit_set_context(p, NULL);
1840 cgroup_fork(p); 1839 cgroup_fork(p);
@@ -2001,6 +2000,17 @@ static __latent_entropy struct task_struct *copy_process(
2001 goto bad_fork_free_pid; 2000 goto bad_fork_free_pid;
2002 2001
2003 /* 2002 /*
2003 * From this point on we must avoid any synchronous user-space
2004 * communication until we take the tasklist-lock. In particular, we do
2005 * not want user-space to be able to predict the process start-time by
2006 * stalling fork(2) after we recorded the start_time but before it is
2007 * visible to the system.
2008 */
2009
2010 p->start_time = ktime_get_ns();
2011 p->real_start_time = ktime_get_boot_ns();
2012
2013 /*
2004 * Make it visible to the rest of the system, but dont wake it up yet. 2014 * Make it visible to the rest of the system, but dont wake it up yet.
2005 * Need tasklist lock for parent etc handling! 2015 * Need tasklist lock for parent etc handling!
2006 */ 2016 */
diff --git a/kernel/futex.c b/kernel/futex.c
index be3bff2315ff..a0514e01c3eb 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1452,11 +1452,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1452 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) 1452 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1453 return; 1453 return;
1454 1454
1455 /* 1455 get_task_struct(p);
1456 * Queue the task for later wakeup for after we've released
1457 * the hb->lock. wake_q_add() grabs reference to p.
1458 */
1459 wake_q_add(wake_q, p);
1460 __unqueue_futex(q); 1456 __unqueue_futex(q);
1461 /* 1457 /*
1462 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL 1458 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
@@ -1466,6 +1462,13 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1466 * plist_del in __unqueue_futex(). 1462 * plist_del in __unqueue_futex().
1467 */ 1463 */
1468 smp_store_release(&q->lock_ptr, NULL); 1464 smp_store_release(&q->lock_ptr, NULL);
1465
1466 /*
1467 * Queue the task for later wakeup for after we've released
1468 * the hb->lock. wake_q_add() grabs reference to p.
1469 */
1470 wake_q_add(wake_q, p);
1471 put_task_struct(p);
1469} 1472}
1470 1473
1471/* 1474/*
@@ -2218,11 +2221,11 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2218 * decrement the counter at queue_unlock() when some error has 2221 * decrement the counter at queue_unlock() when some error has
2219 * occurred and we don't end up adding the task to the list. 2222 * occurred and we don't end up adding the task to the list.
2220 */ 2223 */
2221 hb_waiters_inc(hb); 2224 hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2222 2225
2223 q->lock_ptr = &hb->lock; 2226 q->lock_ptr = &hb->lock;
2224 2227
2225 spin_lock(&hb->lock); /* implies smp_mb(); (A) */ 2228 spin_lock(&hb->lock);
2226 return hb; 2229 return hb;
2227} 2230}
2228 2231
@@ -2858,35 +2861,39 @@ retry_private:
2858 * and BUG when futex_unlock_pi() interleaves with this. 2861 * and BUG when futex_unlock_pi() interleaves with this.
2859 * 2862 *
2860 * Therefore acquire wait_lock while holding hb->lock, but drop the 2863 * Therefore acquire wait_lock while holding hb->lock, but drop the
2861 * latter before calling rt_mutex_start_proxy_lock(). This still fully 2864 * latter before calling __rt_mutex_start_proxy_lock(). This
2862 * serializes against futex_unlock_pi() as that does the exact same 2865 * interleaves with futex_unlock_pi() -- which does a similar lock
2863 * lock handoff sequence. 2866 * handoff -- such that the latter can observe the futex_q::pi_state
2867 * before __rt_mutex_start_proxy_lock() is done.
2864 */ 2868 */
2865 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); 2869 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
2866 spin_unlock(q.lock_ptr); 2870 spin_unlock(q.lock_ptr);
2871 /*
2872 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
2873 * such that futex_unlock_pi() is guaranteed to observe the waiter when
2874 * it sees the futex_q::pi_state.
2875 */
2867 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); 2876 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
2868 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); 2877 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
2869 2878
2870 if (ret) { 2879 if (ret) {
2871 if (ret == 1) 2880 if (ret == 1)
2872 ret = 0; 2881 ret = 0;
2873 2882 goto cleanup;
2874 spin_lock(q.lock_ptr);
2875 goto no_block;
2876 } 2883 }
2877 2884
2878
2879 if (unlikely(to)) 2885 if (unlikely(to))
2880 hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); 2886 hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
2881 2887
2882 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); 2888 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
2883 2889
2890cleanup:
2884 spin_lock(q.lock_ptr); 2891 spin_lock(q.lock_ptr);
2885 /* 2892 /*
2886 * If we failed to acquire the lock (signal/timeout), we must 2893 * If we failed to acquire the lock (deadlock/signal/timeout), we must
2887 * first acquire the hb->lock before removing the lock from the 2894 * first acquire the hb->lock before removing the lock from the
2888 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex 2895 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
2889 * wait lists consistent. 2896 * lists consistent.
2890 * 2897 *
2891 * In particular; it is important that futex_unlock_pi() can not 2898 * In particular; it is important that futex_unlock_pi() can not
2892 * observe this inconsistency. 2899 * observe this inconsistency.
@@ -3010,6 +3017,10 @@ retry:
3010 * there is no point where we hold neither; and therefore 3017 * there is no point where we hold neither; and therefore
3011 * wake_futex_pi() must observe a state consistent with what we 3018 * wake_futex_pi() must observe a state consistent with what we
3012 * observed. 3019 * observed.
3020 *
3021 * In particular; this forces __rt_mutex_start_proxy() to
3022 * complete such that we're guaranteed to observe the
3023 * rt_waiter. Also see the WARN in wake_futex_pi().
3013 */ 3024 */
3014 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 3025 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3015 spin_unlock(&hb->lock); 3026 spin_unlock(&hb->lock);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index ee062b7939d3..ef8ad36cadcf 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -457,7 +457,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
457 457
458 /* Validate affinity mask(s) */ 458 /* Validate affinity mask(s) */
459 if (affinity) { 459 if (affinity) {
460 for (i = 0; i < cnt; i++, i++) { 460 for (i = 0; i < cnt; i++) {
461 if (cpumask_empty(&affinity[i].mask)) 461 if (cpumask_empty(&affinity[i].mask))
462 return -EINVAL; 462 return -EINVAL;
463 } 463 }
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a4888ce4667a..84b54a17b95d 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -393,6 +393,9 @@ int irq_setup_affinity(struct irq_desc *desc)
393 } 393 }
394 394
395 cpumask_and(&mask, cpu_online_mask, set); 395 cpumask_and(&mask, cpu_online_mask, set);
396 if (cpumask_empty(&mask))
397 cpumask_copy(&mask, cpu_online_mask);
398
396 if (node != NUMA_NO_NODE) { 399 if (node != NUMA_NO_NODE) {
397 const struct cpumask *nodemask = cpumask_of_node(node); 400 const struct cpumask *nodemask = cpumask_of_node(node);
398 401
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 581edcc63c26..978d63a8261c 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1726 rt_mutex_set_owner(lock, NULL); 1726 rt_mutex_set_owner(lock, NULL);
1727} 1727}
1728 1728
1729/**
1730 * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1731 * @lock: the rt_mutex to take
1732 * @waiter: the pre-initialized rt_mutex_waiter
1733 * @task: the task to prepare
1734 *
1735 * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
1736 * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
1737 *
1738 * NOTE: does _NOT_ remove the @waiter on failure; must either call
1739 * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
1740 *
1741 * Returns:
1742 * 0 - task blocked on lock
1743 * 1 - acquired the lock for task, caller should wake it up
1744 * <0 - error
1745 *
1746 * Special API call for PI-futex support.
1747 */
1729int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, 1748int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1730 struct rt_mutex_waiter *waiter, 1749 struct rt_mutex_waiter *waiter,
1731 struct task_struct *task) 1750 struct task_struct *task)
1732{ 1751{
1733 int ret; 1752 int ret;
1734 1753
1754 lockdep_assert_held(&lock->wait_lock);
1755
1735 if (try_to_take_rt_mutex(lock, task, NULL)) 1756 if (try_to_take_rt_mutex(lock, task, NULL))
1736 return 1; 1757 return 1;
1737 1758
@@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1749 ret = 0; 1770 ret = 0;
1750 } 1771 }
1751 1772
1752 if (unlikely(ret))
1753 remove_waiter(lock, waiter);
1754
1755 debug_rt_mutex_print_deadlock(waiter); 1773 debug_rt_mutex_print_deadlock(waiter);
1756 1774
1757 return ret; 1775 return ret;
@@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1763 * @waiter: the pre-initialized rt_mutex_waiter 1781 * @waiter: the pre-initialized rt_mutex_waiter
1764 * @task: the task to prepare 1782 * @task: the task to prepare
1765 * 1783 *
1784 * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
1785 * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
1786 *
1787 * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
1788 * on failure.
1789 *
1766 * Returns: 1790 * Returns:
1767 * 0 - task blocked on lock 1791 * 0 - task blocked on lock
1768 * 1 - acquired the lock for task, caller should wake it up 1792 * 1 - acquired the lock for task, caller should wake it up
1769 * <0 - error 1793 * <0 - error
1770 * 1794 *
1771 * Special API call for FUTEX_REQUEUE_PI support. 1795 * Special API call for PI-futex support.
1772 */ 1796 */
1773int rt_mutex_start_proxy_lock(struct rt_mutex *lock, 1797int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1774 struct rt_mutex_waiter *waiter, 1798 struct rt_mutex_waiter *waiter,
@@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1778 1802
1779 raw_spin_lock_irq(&lock->wait_lock); 1803 raw_spin_lock_irq(&lock->wait_lock);
1780 ret = __rt_mutex_start_proxy_lock(lock, waiter, task); 1804 ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
1805 if (unlikely(ret))
1806 remove_waiter(lock, waiter);
1781 raw_spin_unlock_irq(&lock->wait_lock); 1807 raw_spin_unlock_irq(&lock->wait_lock);
1782 1808
1783 return ret; 1809 return ret;
@@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1845 * @lock: the rt_mutex we were woken on 1871 * @lock: the rt_mutex we were woken on
1846 * @waiter: the pre-initialized rt_mutex_waiter 1872 * @waiter: the pre-initialized rt_mutex_waiter
1847 * 1873 *
1848 * Attempt to clean up after a failed rt_mutex_wait_proxy_lock(). 1874 * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
1875 * rt_mutex_wait_proxy_lock().
1849 * 1876 *
1850 * Unless we acquired the lock; we're still enqueued on the wait-list and can 1877 * Unless we acquired the lock; we're still enqueued on the wait-list and can
1851 * in fact still be granted ownership until we're removed. Therefore we can 1878 * in fact still be granted ownership until we're removed. Therefore we can
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 09b180063ee1..50d9af615dc4 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -198,15 +198,22 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
198 woken++; 198 woken++;
199 tsk = waiter->task; 199 tsk = waiter->task;
200 200
201 wake_q_add(wake_q, tsk); 201 get_task_struct(tsk);
202 list_del(&waiter->list); 202 list_del(&waiter->list);
203 /* 203 /*
204 * Ensure that the last operation is setting the reader 204 * Ensure calling get_task_struct() before setting the reader
205 * waiter to nil such that rwsem_down_read_failed() cannot 205 * waiter to nil such that rwsem_down_read_failed() cannot
206 * race with do_exit() by always holding a reference count 206 * race with do_exit() by always holding a reference count
207 * to the task to wakeup. 207 * to the task to wakeup.
208 */ 208 */
209 smp_store_release(&waiter->task, NULL); 209 smp_store_release(&waiter->task, NULL);
210 /*
211 * Ensure issuing the wakeup (either by us or someone else)
212 * after setting the reader waiter to nil.
213 */
214 wake_q_add(wake_q, tsk);
215 /* wake_q_add() already take the task ref */
216 put_task_struct(tsk);
210 } 217 }
211 218
212 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; 219 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
diff --git a/kernel/relay.c b/kernel/relay.c
index 04f248644e06..9e0f52375487 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
428 dentry = chan->cb->create_buf_file(tmpname, chan->parent, 428 dentry = chan->cb->create_buf_file(tmpname, chan->parent,
429 S_IRUSR, buf, 429 S_IRUSR, buf,
430 &chan->is_global); 430 &chan->is_global);
431 if (IS_ERR(dentry))
432 dentry = NULL;
431 433
432 kfree(tmpname); 434 kfree(tmpname);
433 435
@@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
461 dentry = chan->cb->create_buf_file(NULL, NULL, 463 dentry = chan->cb->create_buf_file(NULL, NULL,
462 S_IRUSR, buf, 464 S_IRUSR, buf,
463 &chan->is_global); 465 &chan->is_global);
464 if (WARN_ON(dentry)) 466 if (IS_ERR_OR_NULL(dentry))
465 goto free_buf; 467 goto free_buf;
466 } 468 }
467 469
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a674c7db2f29..d8d76a65cfdd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -396,6 +396,18 @@ static bool set_nr_if_polling(struct task_struct *p)
396#endif 396#endif
397#endif 397#endif
398 398
399/**
400 * wake_q_add() - queue a wakeup for 'later' waking.
401 * @head: the wake_q_head to add @task to
402 * @task: the task to queue for 'later' wakeup
403 *
404 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
405 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
406 * instantly.
407 *
408 * This function must be used as-if it were wake_up_process(); IOW the task
409 * must be ready to be woken at this location.
410 */
399void wake_q_add(struct wake_q_head *head, struct task_struct *task) 411void wake_q_add(struct wake_q_head *head, struct task_struct *task)
400{ 412{
401 struct wake_q_node *node = &task->wake_q; 413 struct wake_q_node *node = &task->wake_q;
@@ -405,10 +417,11 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
405 * its already queued (either by us or someone else) and will get the 417 * its already queued (either by us or someone else) and will get the
406 * wakeup due to that. 418 * wakeup due to that.
407 * 419 *
408 * This cmpxchg() executes a full barrier, which pairs with the full 420 * In order to ensure that a pending wakeup will observe our pending
409 * barrier executed by the wakeup in wake_up_q(). 421 * state, even in the failed case, an explicit smp_mb() must be used.
410 */ 422 */
411 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) 423 smp_mb__before_atomic();
424 if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))
412 return; 425 return;
413 426
414 get_task_struct(task); 427 get_task_struct(task);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 50aa2aba69bd..310d0637fe4b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5980,6 +5980,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
5980 5980
5981#ifdef CONFIG_SCHED_SMT 5981#ifdef CONFIG_SCHED_SMT
5982DEFINE_STATIC_KEY_FALSE(sched_smt_present); 5982DEFINE_STATIC_KEY_FALSE(sched_smt_present);
5983EXPORT_SYMBOL_GPL(sched_smt_present);
5983 5984
5984static inline void set_idle_cores(int cpu, int val) 5985static inline void set_idle_cores(int cpu, int val)
5985{ 5986{
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index fe24de3fbc93..0e97ca9306ef 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -124,6 +124,7 @@
124 * sampling of the aggregate task states would be. 124 * sampling of the aggregate task states would be.
125 */ 125 */
126 126
127#include "../workqueue_internal.h"
127#include <linux/sched/loadavg.h> 128#include <linux/sched/loadavg.h>
128#include <linux/seq_file.h> 129#include <linux/seq_file.h>
129#include <linux/proc_fs.h> 130#include <linux/proc_fs.h>
@@ -321,7 +322,7 @@ static bool update_stats(struct psi_group *group)
321 expires = group->next_update; 322 expires = group->next_update;
322 if (now < expires) 323 if (now < expires)
323 goto out; 324 goto out;
324 if (now - expires > psi_period) 325 if (now - expires >= psi_period)
325 missed_periods = div_u64(now - expires, psi_period); 326 missed_periods = div_u64(now - expires, psi_period);
326 327
327 /* 328 /*
@@ -480,9 +481,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
480 groupc->tasks[t]++; 481 groupc->tasks[t]++;
481 482
482 write_seqcount_end(&groupc->seq); 483 write_seqcount_end(&groupc->seq);
483
484 if (!delayed_work_pending(&group->clock_work))
485 schedule_delayed_work(&group->clock_work, PSI_FREQ);
486} 484}
487 485
488static struct psi_group *iterate_groups(struct task_struct *task, void **iter) 486static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
@@ -513,6 +511,7 @@ void psi_task_change(struct task_struct *task, int clear, int set)
513{ 511{
514 int cpu = task_cpu(task); 512 int cpu = task_cpu(task);
515 struct psi_group *group; 513 struct psi_group *group;
514 bool wake_clock = true;
516 void *iter = NULL; 515 void *iter = NULL;
517 516
518 if (!task->pid) 517 if (!task->pid)
@@ -530,8 +529,22 @@ void psi_task_change(struct task_struct *task, int clear, int set)
530 task->psi_flags &= ~clear; 529 task->psi_flags &= ~clear;
531 task->psi_flags |= set; 530 task->psi_flags |= set;
532 531
533 while ((group = iterate_groups(task, &iter))) 532 /*
533 * Periodic aggregation shuts off if there is a period of no
534 * task changes, so we wake it back up if necessary. However,
535 * don't do this if the task change is the aggregation worker
536 * itself going to sleep, or we'll ping-pong forever.
537 */
538 if (unlikely((clear & TSK_RUNNING) &&
539 (task->flags & PF_WQ_WORKER) &&
540 wq_worker_last_func(task) == psi_update_work))
541 wake_clock = false;
542
543 while ((group = iterate_groups(task, &iter))) {
534 psi_group_change(group, cpu, clear, set); 544 psi_group_change(group, cpu, clear, set);
545 if (wake_clock && !delayed_work_pending(&group->clock_work))
546 schedule_delayed_work(&group->clock_work, PSI_FREQ);
547 }
535} 548}
536 549
537void psi_memstall_tick(struct task_struct *task, int cpu) 550void psi_memstall_tick(struct task_struct *task, int cpu)
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index d7f538847b84..e815781ed751 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -976,6 +976,9 @@ static int seccomp_notify_release(struct inode *inode, struct file *file)
976 struct seccomp_filter *filter = file->private_data; 976 struct seccomp_filter *filter = file->private_data;
977 struct seccomp_knotif *knotif; 977 struct seccomp_knotif *knotif;
978 978
979 if (!filter)
980 return 0;
981
979 mutex_lock(&filter->notify_lock); 982 mutex_lock(&filter->notify_lock);
980 983
981 /* 984 /*
@@ -1300,6 +1303,7 @@ out:
1300out_put_fd: 1303out_put_fd:
1301 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { 1304 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
1302 if (ret < 0) { 1305 if (ret < 0) {
1306 listener_f->private_data = NULL;
1303 fput(listener_f); 1307 fput(listener_f);
1304 put_unused_fd(listener); 1308 put_unused_fd(listener);
1305 } else { 1309 } else {
diff --git a/kernel/signal.c b/kernel/signal.c
index e1d7ad8e6ab1..57b7771e20d7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -688,6 +688,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
688} 688}
689EXPORT_SYMBOL_GPL(dequeue_signal); 689EXPORT_SYMBOL_GPL(dequeue_signal);
690 690
691static int dequeue_synchronous_signal(kernel_siginfo_t *info)
692{
693 struct task_struct *tsk = current;
694 struct sigpending *pending = &tsk->pending;
695 struct sigqueue *q, *sync = NULL;
696
697 /*
698 * Might a synchronous signal be in the queue?
699 */
700 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
701 return 0;
702
703 /*
704 * Return the first synchronous signal in the queue.
705 */
706 list_for_each_entry(q, &pending->list, list) {
707 /* Synchronous signals have a postive si_code */
708 if ((q->info.si_code > SI_USER) &&
709 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
710 sync = q;
711 goto next;
712 }
713 }
714 return 0;
715next:
716 /*
717 * Check if there is another siginfo for the same signal.
718 */
719 list_for_each_entry_continue(q, &pending->list, list) {
720 if (q->info.si_signo == sync->info.si_signo)
721 goto still_pending;
722 }
723
724 sigdelset(&pending->signal, sync->info.si_signo);
725 recalc_sigpending();
726still_pending:
727 list_del_init(&sync->list);
728 copy_siginfo(info, &sync->info);
729 __sigqueue_free(sync);
730 return info->si_signo;
731}
732
691/* 733/*
692 * Tell a process that it has a new active signal.. 734 * Tell a process that it has a new active signal..
693 * 735 *
@@ -1057,10 +1099,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
1057 1099
1058 result = TRACE_SIGNAL_DELIVERED; 1100 result = TRACE_SIGNAL_DELIVERED;
1059 /* 1101 /*
1060 * Skip useless siginfo allocation for SIGKILL SIGSTOP, 1102 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1061 * and kernel threads.
1062 */ 1103 */
1063 if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD)) 1104 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1064 goto out_set; 1105 goto out_set;
1065 1106
1066 /* 1107 /*
@@ -2394,6 +2435,14 @@ relock:
2394 goto relock; 2435 goto relock;
2395 } 2436 }
2396 2437
2438 /* Has this task already been marked for death? */
2439 if (signal_group_exit(signal)) {
2440 ksig->info.si_signo = signr = SIGKILL;
2441 sigdelset(&current->pending.signal, SIGKILL);
2442 recalc_sigpending();
2443 goto fatal;
2444 }
2445
2397 for (;;) { 2446 for (;;) {
2398 struct k_sigaction *ka; 2447 struct k_sigaction *ka;
2399 2448
@@ -2407,7 +2456,15 @@ relock:
2407 goto relock; 2456 goto relock;
2408 } 2457 }
2409 2458
2410 signr = dequeue_signal(current, &current->blocked, &ksig->info); 2459 /*
2460 * Signals generated by the execution of an instruction
2461 * need to be delivered before any other pending signals
2462 * so that the instruction pointer in the signal stack
2463 * frame points to the faulting instruction.
2464 */
2465 signr = dequeue_synchronous_signal(&ksig->info);
2466 if (!signr)
2467 signr = dequeue_signal(current, &current->blocked, &ksig->info);
2411 2468
2412 if (!signr) 2469 if (!signr)
2413 break; /* will return 0 */ 2470 break; /* will return 0 */
@@ -2489,6 +2546,7 @@ relock:
2489 continue; 2546 continue;
2490 } 2547 }
2491 2548
2549 fatal:
2492 spin_unlock_irq(&sighand->siglock); 2550 spin_unlock_irq(&sighand->siglock);
2493 2551
2494 /* 2552 /*
diff --git a/kernel/smp.c b/kernel/smp.c
index 163c451af42e..f4cf1b0bb3b8 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -584,8 +584,6 @@ void __init smp_init(void)
584 num_nodes, (num_nodes > 1 ? "s" : ""), 584 num_nodes, (num_nodes > 1 ? "s" : ""),
585 num_cpus, (num_cpus > 1 ? "s" : "")); 585 num_cpus, (num_cpus > 1 ? "s" : ""));
586 586
587 /* Final decision about SMT support */
588 cpu_smt_check_topology();
589 /* Any cleanup work */ 587 /* Any cleanup work */
590 smp_cpus_done(setup_max_cpus); 588 smp_cpus_done(setup_max_cpus);
591} 589}
diff --git a/kernel/sys.c b/kernel/sys.c
index a48cbf1414b8..f7eb62eceb24 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1207,7 +1207,8 @@ DECLARE_RWSEM(uts_sem);
1207/* 1207/*
1208 * Work around broken programs that cannot handle "Linux 3.0". 1208 * Work around broken programs that cannot handle "Linux 3.0".
1209 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1209 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1210 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60. 1210 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1211 * 2.6.60.
1211 */ 1212 */
1212static int override_release(char __user *release, size_t len) 1213static int override_release(char __user *release, size_t len)
1213{ 1214{
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 8f0644af40be..80f955210861 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
685 * set up the signal and overrun bookkeeping. 685 * set up the signal and overrun bookkeeping.
686 */ 686 */
687 timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); 687 timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
688 timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
688 689
689 /* 690 /*
690 * This acts as a modification timestamp for the timer, 691 * This acts as a modification timestamp for the timer,
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 8b068adb9da1..f1a86a0d881d 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
1204 1204
1205int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 1205int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1206{ 1206{
1207 int err; 1207 return __bpf_probe_register(btp, prog);
1208
1209 mutex_lock(&bpf_event_mutex);
1210 err = __bpf_probe_register(btp, prog);
1211 mutex_unlock(&bpf_event_mutex);
1212 return err;
1213} 1208}
1214 1209
1215int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 1210int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1216{ 1211{
1217 int err; 1212 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1218
1219 mutex_lock(&bpf_event_mutex);
1220 err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1221 mutex_unlock(&bpf_event_mutex);
1222 return err;
1223} 1213}
1224 1214
1225int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 1215int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c521b7347482..c4238b441624 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
3384 const char tgid_space[] = " "; 3384 const char tgid_space[] = " ";
3385 const char space[] = " "; 3385 const char space[] = " ";
3386 3386
3387 print_event_info(buf, m);
3388
3387 seq_printf(m, "# %s _-----=> irqs-off\n", 3389 seq_printf(m, "# %s _-----=> irqs-off\n",
3388 tgid ? tgid_space : space); 3390 tgid ? tgid_space : space);
3389 seq_printf(m, "# %s / _----=> need-resched\n", 3391 seq_printf(m, "# %s / _----=> need-resched\n",
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 5c19b8c41c7e..9eaf07f99212 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -607,11 +607,17 @@ static int trace_kprobe_create(int argc, const char *argv[])
607 char buf[MAX_EVENT_NAME_LEN]; 607 char buf[MAX_EVENT_NAME_LEN];
608 unsigned int flags = TPARG_FL_KERNEL; 608 unsigned int flags = TPARG_FL_KERNEL;
609 609
610 /* argc must be >= 1 */ 610 switch (argv[0][0]) {
611 if (argv[0][0] == 'r') { 611 case 'r':
612 is_return = true; 612 is_return = true;
613 flags |= TPARG_FL_RETURN; 613 flags |= TPARG_FL_RETURN;
614 } else if (argv[0][0] != 'p' || argc < 2) 614 break;
615 case 'p':
616 break;
617 default:
618 return -ECANCELED;
619 }
620 if (argc < 2)
615 return -ECANCELED; 621 return -ECANCELED;
616 622
617 event = strchr(&argv[0][1], ':'); 623 event = strchr(&argv[0][1], ':');
@@ -855,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = {
855static nokprobe_inline int 861static nokprobe_inline int
856fetch_store_strlen(unsigned long addr) 862fetch_store_strlen(unsigned long addr)
857{ 863{
858 mm_segment_t old_fs;
859 int ret, len = 0; 864 int ret, len = 0;
860 u8 c; 865 u8 c;
861 866
862 old_fs = get_fs();
863 set_fs(KERNEL_DS);
864 pagefault_disable();
865
866 do { 867 do {
867 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); 868 ret = probe_mem_read(&c, (u8 *)addr + len, 1);
868 len++; 869 len++;
869 } while (c && ret == 0 && len < MAX_STRING_SIZE); 870 } while (c && ret == 0 && len < MAX_STRING_SIZE);
870 871
871 pagefault_enable();
872 set_fs(old_fs);
873
874 return (ret < 0) ? ret : len; 872 return (ret < 0) ? ret : len;
875} 873}
876 874
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
index 5c56afc17cf8..4737bb8c07a3 100644
--- a/kernel/trace/trace_probe_tmpl.h
+++ b/kernel/trace/trace_probe_tmpl.h
@@ -180,10 +180,12 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
180 if (unlikely(arg->dynamic)) 180 if (unlikely(arg->dynamic))
181 *dl = make_data_loc(maxlen, dyndata - base); 181 *dl = make_data_loc(maxlen, dyndata - base);
182 ret = process_fetch_insn(arg->code, regs, dl, base); 182 ret = process_fetch_insn(arg->code, regs, dl, base);
183 if (unlikely(ret < 0 && arg->dynamic)) 183 if (unlikely(ret < 0 && arg->dynamic)) {
184 *dl = make_data_loc(0, dyndata - base); 184 *dl = make_data_loc(0, dyndata - base);
185 else 185 } else {
186 dyndata += ret; 186 dyndata += ret;
187 maxlen -= ret;
188 }
187 } 189 }
188} 190}
189 191
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index e335576b9411..9bde07c06362 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -5,7 +5,7 @@
5 * Copyright (C) IBM Corporation, 2010-2012 5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> 6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */ 7 */
8#define pr_fmt(fmt) "trace_kprobe: " fmt 8#define pr_fmt(fmt) "trace_uprobe: " fmt
9 9
10#include <linux/ctype.h> 10#include <linux/ctype.h>
11#include <linux/module.h> 11#include <linux/module.h>
@@ -160,6 +160,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
160 if (ret >= 0) { 160 if (ret >= 0) {
161 if (ret == maxlen) 161 if (ret == maxlen)
162 dst[ret - 1] = '\0'; 162 dst[ret - 1] = '\0';
163 else
164 /*
165 * Include the terminating null byte. In this case it
166 * was copied by strncpy_from_user but not accounted
167 * for in ret.
168 */
169 ret++;
163 *(u32 *)dest = make_data_loc(ret, (void *)dst - base); 170 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
164 } 171 }
165 172
diff --git a/kernel/umh.c b/kernel/umh.c
index 0baa672e023c..d937cbad903a 100644
--- a/kernel/umh.c
+++ b/kernel/umh.c
@@ -37,6 +37,8 @@ static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
37static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; 37static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
38static DEFINE_SPINLOCK(umh_sysctl_lock); 38static DEFINE_SPINLOCK(umh_sysctl_lock);
39static DECLARE_RWSEM(umhelper_sem); 39static DECLARE_RWSEM(umhelper_sem);
40static LIST_HEAD(umh_list);
41static DEFINE_MUTEX(umh_list_lock);
40 42
41static void call_usermodehelper_freeinfo(struct subprocess_info *info) 43static void call_usermodehelper_freeinfo(struct subprocess_info *info)
42{ 44{
@@ -100,10 +102,12 @@ static int call_usermodehelper_exec_async(void *data)
100 commit_creds(new); 102 commit_creds(new);
101 103
102 sub_info->pid = task_pid_nr(current); 104 sub_info->pid = task_pid_nr(current);
103 if (sub_info->file) 105 if (sub_info->file) {
104 retval = do_execve_file(sub_info->file, 106 retval = do_execve_file(sub_info->file,
105 sub_info->argv, sub_info->envp); 107 sub_info->argv, sub_info->envp);
106 else 108 if (!retval)
109 current->flags |= PF_UMH;
110 } else
107 retval = do_execve(getname_kernel(sub_info->path), 111 retval = do_execve(getname_kernel(sub_info->path),
108 (const char __user *const __user *)sub_info->argv, 112 (const char __user *const __user *)sub_info->argv,
109 (const char __user *const __user *)sub_info->envp); 113 (const char __user *const __user *)sub_info->envp);
@@ -517,6 +521,11 @@ int fork_usermode_blob(void *data, size_t len, struct umh_info *info)
517 goto out; 521 goto out;
518 522
519 err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); 523 err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
524 if (!err) {
525 mutex_lock(&umh_list_lock);
526 list_add(&info->list, &umh_list);
527 mutex_unlock(&umh_list_lock);
528 }
520out: 529out:
521 fput(file); 530 fput(file);
522 return err; 531 return err;
@@ -679,6 +688,26 @@ static int proc_cap_handler(struct ctl_table *table, int write,
679 return 0; 688 return 0;
680} 689}
681 690
691void __exit_umh(struct task_struct *tsk)
692{
693 struct umh_info *info;
694 pid_t pid = tsk->pid;
695
696 mutex_lock(&umh_list_lock);
697 list_for_each_entry(info, &umh_list, list) {
698 if (info->pid == pid) {
699 list_del(&info->list);
700 mutex_unlock(&umh_list_lock);
701 goto out;
702 }
703 }
704 mutex_unlock(&umh_list_lock);
705 return;
706out:
707 if (info->cleanup)
708 info->cleanup(info);
709}
710
682struct ctl_table usermodehelper_table[] = { 711struct ctl_table usermodehelper_table[] = {
683 { 712 {
684 .procname = "bset", 713 .procname = "bset",
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 392be4b252f6..fc5d23d752a5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -910,6 +910,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
910} 910}
911 911
912/** 912/**
913 * wq_worker_last_func - retrieve worker's last work function
914 *
915 * Determine the last function a worker executed. This is called from
916 * the scheduler to get a worker's last known identity.
917 *
918 * CONTEXT:
919 * spin_lock_irq(rq->lock)
920 *
921 * Return:
922 * The last work function %current executed as a worker, NULL if it
923 * hasn't executed any work yet.
924 */
925work_func_t wq_worker_last_func(struct task_struct *task)
926{
927 struct worker *worker = kthread_data(task);
928
929 return worker->last_func;
930}
931
932/**
913 * worker_set_flags - set worker flags and adjust nr_running accordingly 933 * worker_set_flags - set worker flags and adjust nr_running accordingly
914 * @worker: self 934 * @worker: self
915 * @flags: flags to set 935 * @flags: flags to set
@@ -2184,6 +2204,9 @@ __acquires(&pool->lock)
2184 if (unlikely(cpu_intensive)) 2204 if (unlikely(cpu_intensive))
2185 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2205 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2186 2206
2207 /* tag the worker for identification in schedule() */
2208 worker->last_func = worker->current_func;
2209
2187 /* we're done with it, release */ 2210 /* we're done with it, release */
2188 hash_del(&worker->hentry); 2211 hash_del(&worker->hentry);
2189 worker->current_work = NULL; 2212 worker->current_work = NULL;
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 66fbb5a9e633..cb68b03ca89a 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -53,6 +53,9 @@ struct worker {
53 53
54 /* used only by rescuers to point to the target workqueue */ 54 /* used only by rescuers to point to the target workqueue */
55 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ 55 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
56
57 /* used by the scheduler to determine a worker's last known identity */
58 work_func_t last_func;
56}; 59};
57 60
58/** 61/**
@@ -67,9 +70,10 @@ static inline struct worker *current_wq_worker(void)
67 70
68/* 71/*
69 * Scheduler hooks for concurrency managed workqueue. Only to be used from 72 * Scheduler hooks for concurrency managed workqueue. Only to be used from
70 * sched/core.c and workqueue.c. 73 * sched/ and workqueue.c.
71 */ 74 */
72void wq_worker_waking_up(struct task_struct *task, int cpu); 75void wq_worker_waking_up(struct task_struct *task, int cpu);
73struct task_struct *wq_worker_sleeping(struct task_struct *task); 76struct task_struct *wq_worker_sleeping(struct task_struct *task);
77work_func_t wq_worker_last_func(struct task_struct *task);
74 78
75#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ 79#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index c6659cb37033..59875eb278ea 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -768,9 +768,11 @@ all_leaves_cluster_together:
768 new_s0->index_key[i] = 768 new_s0->index_key[i] =
769 ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); 769 ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
770 770
771 blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); 771 if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
772 pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); 772 blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
773 new_s0->index_key[keylen - 1] &= ~blank; 773 pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
774 new_s0->index_key[keylen - 1] &= ~blank;
775 }
774 776
775 /* This now reduces to a node splitting exercise for which we'll need 777 /* This now reduces to a node splitting exercise for which we'll need
776 * to regenerate the disparity table. 778 * to regenerate the disparity table.
diff --git a/lib/crc32.c b/lib/crc32.c
index 45b1d67a1767..4a20455d1f61 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
206EXPORT_SYMBOL(crc32_le); 206EXPORT_SYMBOL(crc32_le);
207EXPORT_SYMBOL(__crc32c_le); 207EXPORT_SYMBOL(__crc32c_le);
208 208
209u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); 209u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
210u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); 210u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
211 211
212/* 212/*
213 * This multiplies the polynomials x and y modulo the given modulus. 213 * This multiplies the polynomials x and y modulo the given modulus.
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index 14436f4ca6bd..30e0f9770f88 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x)
52 if (x <= ULONG_MAX) 52 if (x <= ULONG_MAX)
53 return int_sqrt((unsigned long) x); 53 return int_sqrt((unsigned long) x);
54 54
55 m = 1ULL << (fls64(x) & ~1ULL); 55 m = 1ULL << ((fls64(x) - 1) & ~1ULL);
56 while (m != 0) { 56 while (m != 0) {
57 b = y + m; 57 b = y + m;
58 y >>= 1; 58 y >>= 1;
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 65c2d06250a6..5b382c1244ed 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -26,14 +26,10 @@
26static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) 26static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
27{ 27{
28 unsigned long mask, val; 28 unsigned long mask, val;
29 unsigned long __maybe_unused flags;
30 bool ret = false; 29 bool ret = false;
30 unsigned long flags;
31 31
32 /* Silence bogus lockdep warning */ 32 spin_lock_irqsave(&sb->map[index].swap_lock, flags);
33#if defined(CONFIG_LOCKDEP)
34 local_irq_save(flags);
35#endif
36 spin_lock(&sb->map[index].swap_lock);
37 33
38 if (!sb->map[index].cleared) 34 if (!sb->map[index].cleared)
39 goto out_unlock; 35 goto out_unlock;
@@ -54,10 +50,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
54 50
55 ret = true; 51 ret = true;
56out_unlock: 52out_unlock:
57 spin_unlock(&sb->map[index].swap_lock); 53 spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
58#if defined(CONFIG_LOCKDEP)
59 local_irq_restore(flags);
60#endif
61 return ret; 54 return ret;
62} 55}
63 56
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index d82d022111e0..9cf77628fc91 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config)
632 config->test_driver = NULL; 632 config->test_driver = NULL;
633 633
634 kfree_const(config->test_fs); 634 kfree_const(config->test_fs);
635 config->test_driver = NULL; 635 config->test_fs = NULL;
636} 636}
637 637
638static void kmod_config_free(struct kmod_test_device *test_dev) 638static void kmod_config_free(struct kmod_test_device *test_dev)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 6a8ac7626797..e52f8cafe227 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
541static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, 541static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
542 int cnt, bool slow) 542 int cnt, bool slow)
543{ 543{
544 struct rhltable rhlt; 544 struct rhltable *rhlt;
545 unsigned int i, ret; 545 unsigned int i, ret;
546 const char *key; 546 const char *key;
547 int err = 0; 547 int err = 0;
548 548
549 err = rhltable_init(&rhlt, &test_rht_params_dup); 549 rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
550 if (WARN_ON(err)) 550 if (WARN_ON(!rhlt))
551 return -EINVAL;
552
553 err = rhltable_init(rhlt, &test_rht_params_dup);
554 if (WARN_ON(err)) {
555 kfree(rhlt);
551 return err; 556 return err;
557 }
552 558
553 for (i = 0; i < cnt; i++) { 559 for (i = 0; i < cnt; i++) {
554 rhl_test_objects[i].value.tid = i; 560 rhl_test_objects[i].value.tid = i;
555 key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead); 561 key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
556 key += test_rht_params_dup.key_offset; 562 key += test_rht_params_dup.key_offset;
557 563
558 if (slow) { 564 if (slow) {
559 err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key, 565 err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
560 &rhl_test_objects[i].list_node.rhead)); 566 &rhl_test_objects[i].list_node.rhead));
561 if (err == -EAGAIN) 567 if (err == -EAGAIN)
562 err = 0; 568 err = 0;
563 } else 569 } else
564 err = rhltable_insert(&rhlt, 570 err = rhltable_insert(rhlt,
565 &rhl_test_objects[i].list_node, 571 &rhl_test_objects[i].list_node,
566 test_rht_params_dup); 572 test_rht_params_dup);
567 if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) 573 if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
568 goto skip_print; 574 goto skip_print;
569 } 575 }
570 576
571 ret = print_ht(&rhlt); 577 ret = print_ht(rhlt);
572 WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); 578 WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
573 579
574skip_print: 580skip_print:
575 rhltable_destroy(&rhlt); 581 rhltable_destroy(rhlt);
582 kfree(rhlt);
576 583
577 return 0; 584 return 0;
578} 585}
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 4676c0a1eeca..c596a957f764 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -199,7 +199,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
199 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 199 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
200 xa_set_mark(xa, index + 1, XA_MARK_0); 200 xa_set_mark(xa, index + 1, XA_MARK_0);
201 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 201 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
202 xa_set_mark(xa, index + 2, XA_MARK_1); 202 xa_set_mark(xa, index + 2, XA_MARK_2);
203 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 203 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
204 xa_store_order(xa, index, order, xa_mk_index(index), 204 xa_store_order(xa, index, order, xa_mk_index(index),
205 GFP_KERNEL); 205 GFP_KERNEL);
@@ -209,8 +209,8 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
209 void *entry; 209 void *entry;
210 210
211 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 211 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
212 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1)); 212 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
213 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); 213 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
214 214
215 /* We should see two elements in the array */ 215 /* We should see two elements in the array */
216 rcu_read_lock(); 216 rcu_read_lock();
@@ -357,7 +357,7 @@ static noinline void check_cmpxchg(struct xarray *xa)
357static noinline void check_reserve(struct xarray *xa) 357static noinline void check_reserve(struct xarray *xa)
358{ 358{
359 void *entry; 359 void *entry;
360 unsigned long index = 0; 360 unsigned long index;
361 361
362 /* An array with a reserved entry is not empty */ 362 /* An array with a reserved entry is not empty */
363 XA_BUG_ON(xa, !xa_empty(xa)); 363 XA_BUG_ON(xa, !xa_empty(xa));
@@ -382,10 +382,12 @@ static noinline void check_reserve(struct xarray *xa)
382 xa_erase_index(xa, 12345678); 382 xa_erase_index(xa, 12345678);
383 XA_BUG_ON(xa, !xa_empty(xa)); 383 XA_BUG_ON(xa, !xa_empty(xa));
384 384
385 /* And so does xa_insert */ 385 /* But xa_insert does not */
386 xa_reserve(xa, 12345678, GFP_KERNEL); 386 xa_reserve(xa, 12345678, GFP_KERNEL);
387 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0); 387 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
388 xa_erase_index(xa, 12345678); 388 -EEXIST);
389 XA_BUG_ON(xa, xa_empty(xa));
390 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
389 XA_BUG_ON(xa, !xa_empty(xa)); 391 XA_BUG_ON(xa, !xa_empty(xa));
390 392
391 /* Can iterate through a reserved entry */ 393 /* Can iterate through a reserved entry */
@@ -393,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa)
393 xa_reserve(xa, 6, GFP_KERNEL); 395 xa_reserve(xa, 6, GFP_KERNEL);
394 xa_store_index(xa, 7, GFP_KERNEL); 396 xa_store_index(xa, 7, GFP_KERNEL);
395 397
396 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { 398 xa_for_each(xa, index, entry) {
397 XA_BUG_ON(xa, index != 5 && index != 7); 399 XA_BUG_ON(xa, index != 5 && index != 7);
398 } 400 }
399 xa_destroy(xa); 401 xa_destroy(xa);
@@ -812,17 +814,16 @@ static noinline void check_find_1(struct xarray *xa)
812static noinline void check_find_2(struct xarray *xa) 814static noinline void check_find_2(struct xarray *xa)
813{ 815{
814 void *entry; 816 void *entry;
815 unsigned long i, j, index = 0; 817 unsigned long i, j, index;
816 818
817 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { 819 xa_for_each(xa, index, entry) {
818 XA_BUG_ON(xa, true); 820 XA_BUG_ON(xa, true);
819 } 821 }
820 822
821 for (i = 0; i < 1024; i++) { 823 for (i = 0; i < 1024; i++) {
822 xa_store_index(xa, index, GFP_KERNEL); 824 xa_store_index(xa, index, GFP_KERNEL);
823 j = 0; 825 j = 0;
824 index = 0; 826 xa_for_each(xa, index, entry) {
825 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
826 XA_BUG_ON(xa, xa_mk_index(index) != entry); 827 XA_BUG_ON(xa, xa_mk_index(index) != entry);
827 XA_BUG_ON(xa, index != j++); 828 XA_BUG_ON(xa, index != j++);
828 } 829 }
@@ -839,6 +840,7 @@ static noinline void check_find_3(struct xarray *xa)
839 840
840 for (i = 0; i < 100; i++) { 841 for (i = 0; i < 100; i++) {
841 for (j = 0; j < 100; j++) { 842 for (j = 0; j < 100; j++) {
843 rcu_read_lock();
842 for (k = 0; k < 100; k++) { 844 for (k = 0; k < 100; k++) {
843 xas_set(&xas, j); 845 xas_set(&xas, j);
844 xas_for_each_marked(&xas, entry, k, XA_MARK_0) 846 xas_for_each_marked(&xas, entry, k, XA_MARK_0)
@@ -847,6 +849,7 @@ static noinline void check_find_3(struct xarray *xa)
847 XA_BUG_ON(xa, 849 XA_BUG_ON(xa,
848 xas.xa_node != XAS_RESTART); 850 xas.xa_node != XAS_RESTART);
849 } 851 }
852 rcu_read_unlock();
850 } 853 }
851 xa_store_index(xa, i, GFP_KERNEL); 854 xa_store_index(xa, i, GFP_KERNEL);
852 xa_set_mark(xa, i, XA_MARK_0); 855 xa_set_mark(xa, i, XA_MARK_0);
@@ -1183,6 +1186,35 @@ static noinline void check_store_range(struct xarray *xa)
1183 } 1186 }
1184} 1187}
1185 1188
1189static void check_align_1(struct xarray *xa, char *name)
1190{
1191 int i;
1192 unsigned int id;
1193 unsigned long index;
1194 void *entry;
1195
1196 for (i = 0; i < 8; i++) {
1197 id = 0;
1198 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL)
1199 != 0);
1200 XA_BUG_ON(xa, id != i);
1201 }
1202 xa_for_each(xa, index, entry)
1203 XA_BUG_ON(xa, xa_is_err(entry));
1204 xa_destroy(xa);
1205}
1206
1207static noinline void check_align(struct xarray *xa)
1208{
1209 char name[] = "Motorola 68000";
1210
1211 check_align_1(xa, name);
1212 check_align_1(xa, name + 1);
1213 check_align_1(xa, name + 2);
1214 check_align_1(xa, name + 3);
1215// check_align_2(xa, name);
1216}
1217
1186static LIST_HEAD(shadow_nodes); 1218static LIST_HEAD(shadow_nodes);
1187 1219
1188static void test_update_node(struct xa_node *node) 1220static void test_update_node(struct xa_node *node)
@@ -1332,6 +1364,7 @@ static int xarray_checks(void)
1332 check_create_range(&array); 1364 check_create_range(&array);
1333 check_store_range(&array); 1365 check_store_range(&array);
1334 check_store_iter(&array); 1366 check_store_iter(&array);
1367 check_align(&xa0);
1335 1368
1336 check_workingset(&array, 0); 1369 check_workingset(&array, 0);
1337 check_workingset(&array, 64); 1370 check_workingset(&array, 64);
diff --git a/lib/xarray.c b/lib/xarray.c
index 5f3f9311de89..81c3171ddde9 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -232,6 +232,8 @@ void *xas_load(struct xa_state *xas)
232 if (xas->xa_shift > node->shift) 232 if (xas->xa_shift > node->shift)
233 break; 233 break;
234 entry = xas_descend(xas, node); 234 entry = xas_descend(xas, node);
235 if (node->shift == 0)
236 break;
235 } 237 }
236 return entry; 238 return entry;
237} 239}
@@ -506,7 +508,7 @@ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
506 for (;;) { 508 for (;;) {
507 void *entry = xa_entry_locked(xas->xa, node, offset); 509 void *entry = xa_entry_locked(xas->xa, node, offset);
508 510
509 if (xa_is_node(entry)) { 511 if (node->shift && xa_is_node(entry)) {
510 node = xa_to_node(entry); 512 node = xa_to_node(entry);
511 offset = 0; 513 offset = 0;
512 continue; 514 continue;
@@ -604,6 +606,7 @@ static int xas_expand(struct xa_state *xas, void *head)
604/* 606/*
605 * xas_create() - Create a slot to store an entry in. 607 * xas_create() - Create a slot to store an entry in.
606 * @xas: XArray operation state. 608 * @xas: XArray operation state.
609 * @allow_root: %true if we can store the entry in the root directly
607 * 610 *
608 * Most users will not need to call this function directly, as it is called 611 * Most users will not need to call this function directly, as it is called
609 * by xas_store(). It is useful for doing conditional store operations 612 * by xas_store(). It is useful for doing conditional store operations
@@ -613,7 +616,7 @@ static int xas_expand(struct xa_state *xas, void *head)
613 * If the slot was newly created, returns %NULL. If it failed to create the 616 * If the slot was newly created, returns %NULL. If it failed to create the
614 * slot, returns %NULL and indicates the error in @xas. 617 * slot, returns %NULL and indicates the error in @xas.
615 */ 618 */
616static void *xas_create(struct xa_state *xas) 619static void *xas_create(struct xa_state *xas, bool allow_root)
617{ 620{
618 struct xarray *xa = xas->xa; 621 struct xarray *xa = xas->xa;
619 void *entry; 622 void *entry;
@@ -628,6 +631,8 @@ static void *xas_create(struct xa_state *xas)
628 shift = xas_expand(xas, entry); 631 shift = xas_expand(xas, entry);
629 if (shift < 0) 632 if (shift < 0)
630 return NULL; 633 return NULL;
634 if (!shift && !allow_root)
635 shift = XA_CHUNK_SHIFT;
631 entry = xa_head_locked(xa); 636 entry = xa_head_locked(xa);
632 slot = &xa->xa_head; 637 slot = &xa->xa_head;
633 } else if (xas_error(xas)) { 638 } else if (xas_error(xas)) {
@@ -687,7 +692,7 @@ void xas_create_range(struct xa_state *xas)
687 xas->xa_sibs = 0; 692 xas->xa_sibs = 0;
688 693
689 for (;;) { 694 for (;;) {
690 xas_create(xas); 695 xas_create(xas, true);
691 if (xas_error(xas)) 696 if (xas_error(xas))
692 goto restore; 697 goto restore;
693 if (xas->xa_index <= (index | XA_CHUNK_MASK)) 698 if (xas->xa_index <= (index | XA_CHUNK_MASK))
@@ -754,7 +759,7 @@ void *xas_store(struct xa_state *xas, void *entry)
754 bool value = xa_is_value(entry); 759 bool value = xa_is_value(entry);
755 760
756 if (entry) 761 if (entry)
757 first = xas_create(xas); 762 first = xas_create(xas, !xa_is_node(entry));
758 else 763 else
759 first = xas_load(xas); 764 first = xas_load(xas);
760 765
@@ -1251,35 +1256,6 @@ void *xas_find_conflict(struct xa_state *xas)
1251EXPORT_SYMBOL_GPL(xas_find_conflict); 1256EXPORT_SYMBOL_GPL(xas_find_conflict);
1252 1257
1253/** 1258/**
1254 * xa_init_flags() - Initialise an empty XArray with flags.
1255 * @xa: XArray.
1256 * @flags: XA_FLAG values.
1257 *
1258 * If you need to initialise an XArray with special flags (eg you need
1259 * to take the lock from interrupt context), use this function instead
1260 * of xa_init().
1261 *
1262 * Context: Any context.
1263 */
1264void xa_init_flags(struct xarray *xa, gfp_t flags)
1265{
1266 unsigned int lock_type;
1267 static struct lock_class_key xa_lock_irq;
1268 static struct lock_class_key xa_lock_bh;
1269
1270 spin_lock_init(&xa->xa_lock);
1271 xa->xa_flags = flags;
1272 xa->xa_head = NULL;
1273
1274 lock_type = xa_lock_type(xa);
1275 if (lock_type == XA_LOCK_IRQ)
1276 lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
1277 else if (lock_type == XA_LOCK_BH)
1278 lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
1279}
1280EXPORT_SYMBOL(xa_init_flags);
1281
1282/**
1283 * xa_load() - Load an entry from an XArray. 1259 * xa_load() - Load an entry from an XArray.
1284 * @xa: XArray. 1260 * @xa: XArray.
1285 * @index: index into array. 1261 * @index: index into array.
@@ -1308,7 +1284,6 @@ static void *xas_result(struct xa_state *xas, void *curr)
1308{ 1284{
1309 if (xa_is_zero(curr)) 1285 if (xa_is_zero(curr))
1310 return NULL; 1286 return NULL;
1311 XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
1312 if (xas_error(xas)) 1287 if (xas_error(xas))
1313 curr = xas->xa_node; 1288 curr = xas->xa_node;
1314 return curr; 1289 return curr;
@@ -1378,7 +1353,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1378 XA_STATE(xas, xa, index); 1353 XA_STATE(xas, xa, index);
1379 void *curr; 1354 void *curr;
1380 1355
1381 if (WARN_ON_ONCE(xa_is_internal(entry))) 1356 if (WARN_ON_ONCE(xa_is_advanced(entry)))
1382 return XA_ERROR(-EINVAL); 1357 return XA_ERROR(-EINVAL);
1383 if (xa_track_free(xa) && !entry) 1358 if (xa_track_free(xa) && !entry)
1384 entry = XA_ZERO_ENTRY; 1359 entry = XA_ZERO_ENTRY;
@@ -1444,7 +1419,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1444 XA_STATE(xas, xa, index); 1419 XA_STATE(xas, xa, index);
1445 void *curr; 1420 void *curr;
1446 1421
1447 if (WARN_ON_ONCE(xa_is_internal(entry))) 1422 if (WARN_ON_ONCE(xa_is_advanced(entry)))
1448 return XA_ERROR(-EINVAL); 1423 return XA_ERROR(-EINVAL);
1449 if (xa_track_free(xa) && !entry) 1424 if (xa_track_free(xa) && !entry)
1450 entry = XA_ZERO_ENTRY; 1425 entry = XA_ZERO_ENTRY;
@@ -1465,6 +1440,47 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1465EXPORT_SYMBOL(__xa_cmpxchg); 1440EXPORT_SYMBOL(__xa_cmpxchg);
1466 1441
1467/** 1442/**
1443 * __xa_insert() - Store this entry in the XArray if no entry is present.
1444 * @xa: XArray.
1445 * @index: Index into array.
1446 * @entry: New entry.
1447 * @gfp: Memory allocation flags.
1448 *
1449 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1450 * if no entry is present. Inserting will fail if a reserved entry is
1451 * present, even though loading from this index will return NULL.
1452 *
1453 * Context: Any context. Expects xa_lock to be held on entry. May
1454 * release and reacquire xa_lock if @gfp flags permit.
1455 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
1456 * -ENOMEM if memory could not be allocated.
1457 */
1458int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1459{
1460 XA_STATE(xas, xa, index);
1461 void *curr;
1462
1463 if (WARN_ON_ONCE(xa_is_advanced(entry)))
1464 return -EINVAL;
1465 if (!entry)
1466 entry = XA_ZERO_ENTRY;
1467
1468 do {
1469 curr = xas_load(&xas);
1470 if (!curr) {
1471 xas_store(&xas, entry);
1472 if (xa_track_free(xa))
1473 xas_clear_mark(&xas, XA_FREE_MARK);
1474 } else {
1475 xas_set_err(&xas, -EEXIST);
1476 }
1477 } while (__xas_nomem(&xas, gfp));
1478
1479 return xas_error(&xas);
1480}
1481EXPORT_SYMBOL(__xa_insert);
1482
1483/**
1468 * __xa_reserve() - Reserve this index in the XArray. 1484 * __xa_reserve() - Reserve this index in the XArray.
1469 * @xa: XArray. 1485 * @xa: XArray.
1470 * @index: Index into array. 1486 * @index: Index into array.
@@ -1567,7 +1583,7 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
1567 if (last + 1) 1583 if (last + 1)
1568 order = __ffs(last + 1); 1584 order = __ffs(last + 1);
1569 xas_set_order(&xas, last, order); 1585 xas_set_order(&xas, last, order);
1570 xas_create(&xas); 1586 xas_create(&xas, true);
1571 if (xas_error(&xas)) 1587 if (xas_error(&xas))
1572 goto unlock; 1588 goto unlock;
1573 } 1589 }
@@ -1609,7 +1625,7 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
1609 XA_STATE(xas, xa, 0); 1625 XA_STATE(xas, xa, 0);
1610 int err; 1626 int err;
1611 1627
1612 if (WARN_ON_ONCE(xa_is_internal(entry))) 1628 if (WARN_ON_ONCE(xa_is_advanced(entry)))
1613 return -EINVAL; 1629 return -EINVAL;
1614 if (WARN_ON_ONCE(!xa_track_free(xa))) 1630 if (WARN_ON_ONCE(!xa_track_free(xa)))
1615 return -EINVAL; 1631 return -EINVAL;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8a8bb8796c6c..72e6d0c55cfa 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -689,6 +689,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
689 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); 689 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
690 bdi->cgwb_congested_tree = RB_ROOT; 690 bdi->cgwb_congested_tree = RB_ROOT;
691 mutex_init(&bdi->cgwb_release_mutex); 691 mutex_init(&bdi->cgwb_release_mutex);
692 init_rwsem(&bdi->wb_switch_rwsem);
692 693
693 ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); 694 ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
694 if (!ret) { 695 if (!ret) {
diff --git a/mm/debug.c b/mm/debug.c
index 0abb987dad9b..1611cf00a137 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = {
44 44
45void __dump_page(struct page *page, const char *reason) 45void __dump_page(struct page *page, const char *reason)
46{ 46{
47 struct address_space *mapping = page_mapping(page); 47 struct address_space *mapping;
48 bool page_poisoned = PagePoisoned(page); 48 bool page_poisoned = PagePoisoned(page);
49 int mapcount; 49 int mapcount;
50 50
@@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason)
58 goto hex_only; 58 goto hex_only;
59 } 59 }
60 60
61 mapping = page_mapping(page);
62
61 /* 63 /*
62 * Avoid VM_BUG_ON() in page_mapcount(). 64 * Avoid VM_BUG_ON() in page_mapcount().
63 * page->_mapcount space in struct page is used by sl[aou]b pages to 65 * page->_mapcount space in struct page is used by sl[aou]b pages to
diff --git a/mm/gup.c b/mm/gup.c
index 05acd7e2eb22..75029649baca 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1674,7 +1674,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1674 if (!pmd_present(pmd)) 1674 if (!pmd_present(pmd))
1675 return 0; 1675 return 0;
1676 1676
1677 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { 1677 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
1678 pmd_devmap(pmd))) {
1678 /* 1679 /*
1679 * NUMA hinting faults need to be handled in the GUP 1680 * NUMA hinting faults need to be handled in the GUP
1680 * slowpath for accounting purposes and so that they 1681 * slowpath for accounting purposes and so that they
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 745088810965..afef61656c1e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3238,7 +3238,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3238 struct page *ptepage; 3238 struct page *ptepage;
3239 unsigned long addr; 3239 unsigned long addr;
3240 int cow; 3240 int cow;
3241 struct address_space *mapping = vma->vm_file->f_mapping;
3242 struct hstate *h = hstate_vma(vma); 3241 struct hstate *h = hstate_vma(vma);
3243 unsigned long sz = huge_page_size(h); 3242 unsigned long sz = huge_page_size(h);
3244 struct mmu_notifier_range range; 3243 struct mmu_notifier_range range;
@@ -3250,23 +3249,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3250 mmu_notifier_range_init(&range, src, vma->vm_start, 3249 mmu_notifier_range_init(&range, src, vma->vm_start,
3251 vma->vm_end); 3250 vma->vm_end);
3252 mmu_notifier_invalidate_range_start(&range); 3251 mmu_notifier_invalidate_range_start(&range);
3253 } else {
3254 /*
3255 * For shared mappings i_mmap_rwsem must be held to call
3256 * huge_pte_alloc, otherwise the returned ptep could go
3257 * away if part of a shared pmd and another thread calls
3258 * huge_pmd_unshare.
3259 */
3260 i_mmap_lock_read(mapping);
3261 } 3252 }
3262 3253
3263 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 3254 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3264 spinlock_t *src_ptl, *dst_ptl; 3255 spinlock_t *src_ptl, *dst_ptl;
3265
3266 src_pte = huge_pte_offset(src, addr, sz); 3256 src_pte = huge_pte_offset(src, addr, sz);
3267 if (!src_pte) 3257 if (!src_pte)
3268 continue; 3258 continue;
3269
3270 dst_pte = huge_pte_alloc(dst, addr, sz); 3259 dst_pte = huge_pte_alloc(dst, addr, sz);
3271 if (!dst_pte) { 3260 if (!dst_pte) {
3272 ret = -ENOMEM; 3261 ret = -ENOMEM;
@@ -3337,8 +3326,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3337 3326
3338 if (cow) 3327 if (cow)
3339 mmu_notifier_invalidate_range_end(&range); 3328 mmu_notifier_invalidate_range_end(&range);
3340 else
3341 i_mmap_unlock_read(mapping);
3342 3329
3343 return ret; 3330 return ret;
3344} 3331}
@@ -3755,16 +3742,16 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3755 } 3742 }
3756 3743
3757 /* 3744 /*
3758 * We can not race with truncation due to holding i_mmap_rwsem. 3745 * Use page lock to guard against racing truncation
3759 * Check once here for faults beyond end of file. 3746 * before we get page_table_lock.
3760 */ 3747 */
3761 size = i_size_read(mapping->host) >> huge_page_shift(h);
3762 if (idx >= size)
3763 goto out;
3764
3765retry: 3748retry:
3766 page = find_lock_page(mapping, idx); 3749 page = find_lock_page(mapping, idx);
3767 if (!page) { 3750 if (!page) {
3751 size = i_size_read(mapping->host) >> huge_page_shift(h);
3752 if (idx >= size)
3753 goto out;
3754
3768 /* 3755 /*
3769 * Check for page in userfault range 3756 * Check for page in userfault range
3770 */ 3757 */
@@ -3784,18 +3771,14 @@ retry:
3784 }; 3771 };
3785 3772
3786 /* 3773 /*
3787 * hugetlb_fault_mutex and i_mmap_rwsem must be 3774 * hugetlb_fault_mutex must be dropped before
3788 * dropped before handling userfault. Reacquire 3775 * handling userfault. Reacquire after handling
3789 * after handling fault to make calling code simpler. 3776 * fault to make calling code simpler.
3790 */ 3777 */
3791 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, 3778 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
3792 idx, haddr); 3779 idx, haddr);
3793 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 3780 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3794 i_mmap_unlock_read(mapping);
3795
3796 ret = handle_userfault(&vmf, VM_UFFD_MISSING); 3781 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3797
3798 i_mmap_lock_read(mapping);
3799 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3782 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3800 goto out; 3783 goto out;
3801 } 3784 }
@@ -3854,6 +3837,9 @@ retry:
3854 } 3837 }
3855 3838
3856 ptl = huge_pte_lock(h, mm, ptep); 3839 ptl = huge_pte_lock(h, mm, ptep);
3840 size = i_size_read(mapping->host) >> huge_page_shift(h);
3841 if (idx >= size)
3842 goto backout;
3857 3843
3858 ret = 0; 3844 ret = 0;
3859 if (!huge_pte_none(huge_ptep_get(ptep))) 3845 if (!huge_pte_none(huge_ptep_get(ptep)))
@@ -3940,11 +3926,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3940 3926
3941 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 3927 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3942 if (ptep) { 3928 if (ptep) {
3943 /*
3944 * Since we hold no locks, ptep could be stale. That is
3945 * OK as we are only making decisions based on content and
3946 * not actually modifying content here.
3947 */
3948 entry = huge_ptep_get(ptep); 3929 entry = huge_ptep_get(ptep);
3949 if (unlikely(is_hugetlb_entry_migration(entry))) { 3930 if (unlikely(is_hugetlb_entry_migration(entry))) {
3950 migration_entry_wait_huge(vma, mm, ptep); 3931 migration_entry_wait_huge(vma, mm, ptep);
@@ -3952,33 +3933,20 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3952 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 3933 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3953 return VM_FAULT_HWPOISON_LARGE | 3934 return VM_FAULT_HWPOISON_LARGE |
3954 VM_FAULT_SET_HINDEX(hstate_index(h)); 3935 VM_FAULT_SET_HINDEX(hstate_index(h));
3936 } else {
3937 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3938 if (!ptep)
3939 return VM_FAULT_OOM;
3955 } 3940 }
3956 3941
3957 /*
3958 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
3959 * until finished with ptep. This serves two purposes:
3960 * 1) It prevents huge_pmd_unshare from being called elsewhere
3961 * and making the ptep no longer valid.
3962 * 2) It synchronizes us with file truncation.
3963 *
3964 * ptep could have already be assigned via huge_pte_offset. That
3965 * is OK, as huge_pte_alloc will return the same value unless
3966 * something changed.
3967 */
3968 mapping = vma->vm_file->f_mapping; 3942 mapping = vma->vm_file->f_mapping;
3969 i_mmap_lock_read(mapping); 3943 idx = vma_hugecache_offset(h, vma, haddr);
3970 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3971 if (!ptep) {
3972 i_mmap_unlock_read(mapping);
3973 return VM_FAULT_OOM;
3974 }
3975 3944
3976 /* 3945 /*
3977 * Serialize hugepage allocation and instantiation, so that we don't 3946 * Serialize hugepage allocation and instantiation, so that we don't
3978 * get spurious allocation failures if two CPUs race to instantiate 3947 * get spurious allocation failures if two CPUs race to instantiate
3979 * the same page in the page cache. 3948 * the same page in the page cache.
3980 */ 3949 */
3981 idx = vma_hugecache_offset(h, vma, haddr);
3982 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); 3950 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
3983 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3951 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3984 3952
@@ -4066,7 +4034,6 @@ out_ptl:
4066 } 4034 }
4067out_mutex: 4035out_mutex:
4068 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 4036 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4069 i_mmap_unlock_read(mapping);
4070 /* 4037 /*
4071 * Generally it's safe to hold refcount during waiting page lock. But 4038 * Generally it's safe to hold refcount during waiting page lock. But
4072 * here we just wait to defer the next page fault to avoid busy loop and 4039 * here we just wait to defer the next page fault to avoid busy loop and
@@ -4301,7 +4268,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4301 break; 4268 break;
4302 } 4269 }
4303 if (ret & VM_FAULT_RETRY) { 4270 if (ret & VM_FAULT_RETRY) {
4304 if (nonblocking) 4271 if (nonblocking &&
4272 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4305 *nonblocking = 0; 4273 *nonblocking = 0;
4306 *nr_pages = 0; 4274 *nr_pages = 0;
4307 /* 4275 /*
@@ -4671,12 +4639,10 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4671 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 4639 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4672 * and returns the corresponding pte. While this is not necessary for the 4640 * and returns the corresponding pte. While this is not necessary for the
4673 * !shared pmd case because we can allocate the pmd later as well, it makes the 4641 * !shared pmd case because we can allocate the pmd later as well, it makes the
4674 * code much cleaner. 4642 * code much cleaner. pmd allocation is essential for the shared case because
4675 * 4643 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4676 * This routine must be called with i_mmap_rwsem held in at least read mode. 4644 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4677 * For hugetlbfs, this prevents removal of any page table entries associated 4645 * bad pmd for sharing.
4678 * with the address space. This is important as we are setting up sharing
4679 * based on existing page table entries (mappings).
4680 */ 4646 */
4681pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 4647pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4682{ 4648{
@@ -4693,6 +4659,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4693 if (!vma_shareable(vma, addr)) 4659 if (!vma_shareable(vma, addr))
4694 return (pte_t *)pmd_alloc(mm, pud, addr); 4660 return (pte_t *)pmd_alloc(mm, pud, addr);
4695 4661
4662 i_mmap_lock_write(mapping);
4696 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 4663 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4697 if (svma == vma) 4664 if (svma == vma)
4698 continue; 4665 continue;
@@ -4722,6 +4689,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4722 spin_unlock(ptl); 4689 spin_unlock(ptl);
4723out: 4690out:
4724 pte = (pte_t *)pmd_alloc(mm, pud, addr); 4691 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4692 i_mmap_unlock_write(mapping);
4725 return pte; 4693 return pte;
4726} 4694}
4727 4695
@@ -4732,7 +4700,7 @@ out:
4732 * indicated by page_count > 1, unmap is achieved by clearing pud and 4700 * indicated by page_count > 1, unmap is achieved by clearing pud and
4733 * decrementing the ref count. If count == 1, the pte page is not shared. 4701 * decrementing the ref count. If count == 1, the pte page is not shared.
4734 * 4702 *
4735 * Called with page table lock held and i_mmap_rwsem held in write mode. 4703 * called with page table lock held.
4736 * 4704 *
4737 * returns: 1 successfully unmapped a shared pte page 4705 * returns: 1 successfully unmapped a shared pte page
4738 * 0 the underlying pte page is not shared, or it is the last user 4706 * 0 the underlying pte page is not shared, or it is the last user
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 0a14fcff70ed..5d1065efbd47 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -5,7 +5,10 @@ UBSAN_SANITIZE_generic.o := n
5UBSAN_SANITIZE_tags.o := n 5UBSAN_SANITIZE_tags.o := n
6KCOV_INSTRUMENT := n 6KCOV_INSTRUMENT := n
7 7
8CFLAGS_REMOVE_common.o = -pg
8CFLAGS_REMOVE_generic.o = -pg 9CFLAGS_REMOVE_generic.o = -pg
10CFLAGS_REMOVE_tags.o = -pg
11
9# Function splitter causes unnecessary splits in __asan_load1/__asan_store1 12# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
10# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 13# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
11 14
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 03d5d1374ca7..09b534fbba17 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -298,8 +298,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
298 return; 298 return;
299 } 299 }
300 300
301 cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE);
302
303 *flags |= SLAB_KASAN; 301 *flags |= SLAB_KASAN;
304} 302}
305 303
@@ -349,28 +347,48 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
349} 347}
350 348
351/* 349/*
352 * Since it's desirable to only call object contructors once during slab 350 * This function assigns a tag to an object considering the following:
353 * allocation, we preassign tags to all such objects. Also preassign tags for 351 * 1. A cache might have a constructor, which might save a pointer to a slab
354 * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports. 352 * object somewhere (e.g. in the object itself). We preassign a tag for
355 * For SLAB allocator we can't preassign tags randomly since the freelist is 353 * each object in caches with constructors during slab creation and reuse
356 * stored as an array of indexes instead of a linked list. Assign tags based 354 * the same tag each time a particular object is allocated.
357 * on objects indexes, so that objects that are next to each other get 355 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
358 * different tags. 356 * accessed after being freed. We preassign tags for objects in these
359 * After a tag is assigned, the object always gets allocated with the same tag. 357 * caches as well.
360 * The reason is that we can't change tags for objects with constructors on 358 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
361 * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor 359 * is stored as an array of indexes instead of a linked list. Assign tags
362 * code can save the pointer to the object somewhere (e.g. in the object 360 * based on objects indexes, so that objects that are next to each other
363 * itself). Then if we retag it, the old saved pointer will become invalid. 361 * get different tags.
364 */ 362 */
365static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new) 363static u8 assign_tag(struct kmem_cache *cache, const void *object,
364 bool init, bool keep_tag)
366{ 365{
366 /*
367 * 1. When an object is kmalloc()'ed, two hooks are called:
368 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
369 * tag only in the first one.
370 * 2. We reuse the same tag for krealloc'ed objects.
371 */
372 if (keep_tag)
373 return get_tag(object);
374
375 /*
376 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
377 * set, assign a tag when the object is being allocated (init == false).
378 */
367 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) 379 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
368 return new ? KASAN_TAG_KERNEL : random_tag(); 380 return init ? KASAN_TAG_KERNEL : random_tag();
369 381
382 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
370#ifdef CONFIG_SLAB 383#ifdef CONFIG_SLAB
384 /* For SLAB assign tags based on the object index in the freelist. */
371 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); 385 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
372#else 386#else
373 return new ? random_tag() : get_tag(object); 387 /*
388 * For SLUB assign a random tag during slab creation, otherwise reuse
389 * the already assigned tag.
390 */
391 return init ? random_tag() : get_tag(object);
374#endif 392#endif
375} 393}
376 394
@@ -386,17 +404,12 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
386 __memset(alloc_info, 0, sizeof(*alloc_info)); 404 __memset(alloc_info, 0, sizeof(*alloc_info));
387 405
388 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 406 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
389 object = set_tag(object, assign_tag(cache, object, true)); 407 object = set_tag(object,
408 assign_tag(cache, object, true, false));
390 409
391 return (void *)object; 410 return (void *)object;
392} 411}
393 412
394void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
395 gfp_t flags)
396{
397 return kasan_kmalloc(cache, object, cache->object_size, flags);
398}
399
400static inline bool shadow_invalid(u8 tag, s8 shadow_byte) 413static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
401{ 414{
402 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 415 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
@@ -452,8 +465,8 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
452 return __kasan_slab_free(cache, object, ip, true); 465 return __kasan_slab_free(cache, object, ip, true);
453} 466}
454 467
455void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, 468static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
456 size_t size, gfp_t flags) 469 size_t size, gfp_t flags, bool keep_tag)
457{ 470{
458 unsigned long redzone_start; 471 unsigned long redzone_start;
459 unsigned long redzone_end; 472 unsigned long redzone_end;
@@ -471,7 +484,7 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
471 KASAN_SHADOW_SCALE_SIZE); 484 KASAN_SHADOW_SCALE_SIZE);
472 485
473 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 486 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
474 tag = assign_tag(cache, object, false); 487 tag = assign_tag(cache, object, false, keep_tag);
475 488
476 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ 489 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
477 kasan_unpoison_shadow(set_tag(object, tag), size); 490 kasan_unpoison_shadow(set_tag(object, tag), size);
@@ -483,6 +496,18 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
483 496
484 return set_tag(object, tag); 497 return set_tag(object, tag);
485} 498}
499
500void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
501 gfp_t flags)
502{
503 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
504}
505
506void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
507 size_t size, gfp_t flags)
508{
509 return __kasan_kmalloc(cache, object, size, flags, true);
510}
486EXPORT_SYMBOL(kasan_kmalloc); 511EXPORT_SYMBOL(kasan_kmalloc);
487 512
488void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, 513void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
@@ -522,7 +547,8 @@ void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
522 if (unlikely(!PageSlab(page))) 547 if (unlikely(!PageSlab(page)))
523 return kasan_kmalloc_large(object, size, flags); 548 return kasan_kmalloc_large(object, size, flags);
524 else 549 else
525 return kasan_kmalloc(page->slab_cache, object, size, flags); 550 return __kasan_kmalloc(page->slab_cache, object, size,
551 flags, true);
526} 552}
527 553
528void kasan_poison_kfree(void *ptr, unsigned long ip) 554void kasan_poison_kfree(void *ptr, unsigned long ip)
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 0777649e07c4..63fca3172659 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -46,7 +46,7 @@ void kasan_init_tags(void)
46 int cpu; 46 int cpu;
47 47
48 for_each_possible_cpu(cpu) 48 for_each_possible_cpu(cpu)
49 per_cpu(prng_state, cpu) = get_random_u32(); 49 per_cpu(prng_state, cpu) = (u32)get_cycles();
50} 50}
51 51
52/* 52/*
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index f9d9dc250428..707fa5579f66 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
574 unsigned long flags; 574 unsigned long flags;
575 struct kmemleak_object *object, *parent; 575 struct kmemleak_object *object, *parent;
576 struct rb_node **link, *rb_parent; 576 struct rb_node **link, *rb_parent;
577 unsigned long untagged_ptr;
577 578
578 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); 579 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
579 if (!object) { 580 if (!object) {
@@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
619 620
620 write_lock_irqsave(&kmemleak_lock, flags); 621 write_lock_irqsave(&kmemleak_lock, flags);
621 622
622 min_addr = min(min_addr, ptr); 623 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
623 max_addr = max(max_addr, ptr + size); 624 min_addr = min(min_addr, untagged_ptr);
625 max_addr = max(max_addr, untagged_ptr + size);
624 link = &object_tree_root.rb_node; 626 link = &object_tree_root.rb_node;
625 rb_parent = NULL; 627 rb_parent = NULL;
626 while (*link) { 628 while (*link) {
@@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end,
1333 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); 1335 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1334 unsigned long *end = _end - (BYTES_PER_POINTER - 1); 1336 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1335 unsigned long flags; 1337 unsigned long flags;
1338 unsigned long untagged_ptr;
1336 1339
1337 read_lock_irqsave(&kmemleak_lock, flags); 1340 read_lock_irqsave(&kmemleak_lock, flags);
1338 for (ptr = start; ptr < end; ptr++) { 1341 for (ptr = start; ptr < end; ptr++) {
@@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end,
1347 pointer = *ptr; 1350 pointer = *ptr;
1348 kasan_enable_current(); 1351 kasan_enable_current();
1349 1352
1350 if (pointer < min_addr || pointer >= max_addr) 1353 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1354 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1351 continue; 1355 continue;
1352 1356
1353 /* 1357 /*
diff --git a/mm/memblock.c b/mm/memblock.c
index 022d4cbb3618..ea31045ba704 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -26,6 +26,13 @@
26 26
27#include "internal.h" 27#include "internal.h"
28 28
29#define INIT_MEMBLOCK_REGIONS 128
30#define INIT_PHYSMEM_REGIONS 4
31
32#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
33# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
34#endif
35
29/** 36/**
30 * DOC: memblock overview 37 * DOC: memblock overview
31 * 38 *
@@ -92,7 +99,7 @@ unsigned long max_pfn;
92unsigned long long max_possible_pfn; 99unsigned long long max_possible_pfn;
93 100
94static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 101static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
95static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 102static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
96#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 103#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
97static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 104static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
98#endif 105#endif
@@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = {
105 112
106 .reserved.regions = memblock_reserved_init_regions, 113 .reserved.regions = memblock_reserved_init_regions,
107 .reserved.cnt = 1, /* empty dummy entry */ 114 .reserved.cnt = 1, /* empty dummy entry */
108 .reserved.max = INIT_MEMBLOCK_REGIONS, 115 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
109 .reserved.name = "reserved", 116 .reserved.name = "reserved",
110 117
111#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 118#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 6379fff1a5ff..831be5ff5f4d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
372 if (fail || tk->addr_valid == 0) { 372 if (fail || tk->addr_valid == 0) {
373 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 373 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
374 pfn, tk->tsk->comm, tk->tsk->pid); 374 pfn, tk->tsk->comm, tk->tsk->pid);
375 force_sig(SIGKILL, tk->tsk); 375 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
376 tk->tsk, PIDTYPE_PID);
376 } 377 }
377 378
378 /* 379 /*
@@ -966,7 +967,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
966 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 967 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
967 struct address_space *mapping; 968 struct address_space *mapping;
968 LIST_HEAD(tokill); 969 LIST_HEAD(tokill);
969 bool unmap_success = true; 970 bool unmap_success;
970 int kill = 1, forcekill; 971 int kill = 1, forcekill;
971 struct page *hpage = *hpagep; 972 struct page *hpage = *hpagep;
972 bool mlocked = PageMlocked(hpage); 973 bool mlocked = PageMlocked(hpage);
@@ -1028,19 +1029,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1028 if (kill) 1029 if (kill)
1029 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); 1030 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1030 1031
1031 if (!PageHuge(hpage)) { 1032 unmap_success = try_to_unmap(hpage, ttu);
1032 unmap_success = try_to_unmap(hpage, ttu);
1033 } else if (mapping) {
1034 /*
1035 * For hugetlb pages, try_to_unmap could potentially call
1036 * huge_pmd_unshare. Because of this, take semaphore in
1037 * write mode here and set TTU_RMAP_LOCKED to indicate we
1038 * have taken the lock at this higer level.
1039 */
1040 i_mmap_lock_write(mapping);
1041 unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
1042 i_mmap_unlock_write(mapping);
1043 }
1044 if (!unmap_success) 1033 if (!unmap_success)
1045 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", 1034 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
1046 pfn, page_mapcount(hpage)); 1035 pfn, page_mapcount(hpage));
diff --git a/mm/memory.c b/mm/memory.c
index a52663c0612d..e11ca9dd823f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2994,6 +2994,28 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
2994 struct vm_area_struct *vma = vmf->vma; 2994 struct vm_area_struct *vma = vmf->vma;
2995 vm_fault_t ret; 2995 vm_fault_t ret;
2996 2996
2997 /*
2998 * Preallocate pte before we take page_lock because this might lead to
2999 * deadlocks for memcg reclaim which waits for pages under writeback:
3000 * lock_page(A)
3001 * SetPageWriteback(A)
3002 * unlock_page(A)
3003 * lock_page(B)
3004 * lock_page(B)
3005 * pte_alloc_pne
3006 * shrink_page_list
3007 * wait_on_page_writeback(A)
3008 * SetPageWriteback(B)
3009 * unlock_page(B)
3010 * # flush A, B to clear the writeback
3011 */
3012 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3013 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
3014 if (!vmf->prealloc_pte)
3015 return VM_FAULT_OOM;
3016 smp_wmb(); /* See comment in __pte_alloc() */
3017 }
3018
2997 ret = vma->vm_ops->fault(vmf); 3019 ret = vma->vm_ops->fault(vmf);
2998 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 3020 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
2999 VM_FAULT_DONE_COW))) 3021 VM_FAULT_DONE_COW)))
@@ -4077,8 +4099,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4077 goto out; 4099 goto out;
4078 4100
4079 if (range) { 4101 if (range) {
4080 range->start = address & PAGE_MASK; 4102 mmu_notifier_range_init(range, mm, address & PAGE_MASK,
4081 range->end = range->start + PAGE_SIZE; 4103 (address & PAGE_MASK) + PAGE_SIZE);
4082 mmu_notifier_invalidate_range_start(range); 4104 mmu_notifier_invalidate_range_start(range);
4083 } 4105 }
4084 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 4106 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b9a667d36c55..1ad28323fb9f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page)
1188 return PageBuddy(page) && page_order(page) >= pageblock_order; 1188 return PageBuddy(page) && page_order(page) >= pageblock_order;
1189} 1189}
1190 1190
1191/* Return the start of the next active pageblock after a given page */ 1191/* Return the pfn of the start of the next active pageblock after a given pfn */
1192static struct page *next_active_pageblock(struct page *page) 1192static unsigned long next_active_pageblock(unsigned long pfn)
1193{ 1193{
1194 struct page *page = pfn_to_page(pfn);
1195
1194 /* Ensure the starting page is pageblock-aligned */ 1196 /* Ensure the starting page is pageblock-aligned */
1195 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); 1197 BUG_ON(pfn & (pageblock_nr_pages - 1));
1196 1198
1197 /* If the entire pageblock is free, move to the end of free page */ 1199 /* If the entire pageblock is free, move to the end of free page */
1198 if (pageblock_free(page)) { 1200 if (pageblock_free(page)) {
@@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page)
1200 /* be careful. we don't have locks, page_order can be changed.*/ 1202 /* be careful. we don't have locks, page_order can be changed.*/
1201 order = page_order(page); 1203 order = page_order(page);
1202 if ((order < MAX_ORDER) && (order >= pageblock_order)) 1204 if ((order < MAX_ORDER) && (order >= pageblock_order))
1203 return page + (1 << order); 1205 return pfn + (1 << order);
1204 } 1206 }
1205 1207
1206 return page + pageblock_nr_pages; 1208 return pfn + pageblock_nr_pages;
1207} 1209}
1208 1210
1209static bool is_pageblock_removable_nolock(struct page *page) 1211static bool is_pageblock_removable_nolock(unsigned long pfn)
1210{ 1212{
1213 struct page *page = pfn_to_page(pfn);
1211 struct zone *zone; 1214 struct zone *zone;
1212 unsigned long pfn;
1213 1215
1214 /* 1216 /*
1215 * We have to be careful here because we are iterating over memory 1217 * We have to be careful here because we are iterating over memory
@@ -1232,12 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
1232/* Checks if this range of memory is likely to be hot-removable. */ 1234/* Checks if this range of memory is likely to be hot-removable. */
1233bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 1235bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1234{ 1236{
1235 struct page *page = pfn_to_page(start_pfn); 1237 unsigned long end_pfn, pfn;
1236 struct page *end_page = page + nr_pages; 1238
1239 end_pfn = min(start_pfn + nr_pages,
1240 zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
1237 1241
1238 /* Check the starting page of each pageblock within the range */ 1242 /* Check the starting page of each pageblock within the range */
1239 for (; page < end_page; page = next_active_pageblock(page)) { 1243 for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
1240 if (!is_pageblock_removable_nolock(page)) 1244 if (!is_pageblock_removable_nolock(pfn))
1241 return false; 1245 return false;
1242 cond_resched(); 1246 cond_resched();
1243 } 1247 }
@@ -1273,6 +1277,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1273 i++; 1277 i++;
1274 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) 1278 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
1275 continue; 1279 continue;
1280 /* Check if we got outside of the zone */
1281 if (zone && !zone_spans_pfn(zone, pfn + i))
1282 return 0;
1276 page = pfn_to_page(pfn + i); 1283 page = pfn_to_page(pfn + i);
1277 if (zone && page_zone(page) != zone) 1284 if (zone && page_zone(page) != zone)
1278 return 0; 1285 return 0;
@@ -1301,23 +1308,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1301static unsigned long scan_movable_pages(unsigned long start, unsigned long end) 1308static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
1302{ 1309{
1303 unsigned long pfn; 1310 unsigned long pfn;
1304 struct page *page; 1311
1305 for (pfn = start; pfn < end; pfn++) { 1312 for (pfn = start; pfn < end; pfn++) {
1306 if (pfn_valid(pfn)) { 1313 struct page *page, *head;
1307 page = pfn_to_page(pfn); 1314 unsigned long skip;
1308 if (PageLRU(page)) 1315
1309 return pfn; 1316 if (!pfn_valid(pfn))
1310 if (__PageMovable(page)) 1317 continue;
1311 return pfn; 1318 page = pfn_to_page(pfn);
1312 if (PageHuge(page)) { 1319 if (PageLRU(page))
1313 if (hugepage_migration_supported(page_hstate(page)) && 1320 return pfn;
1314 page_huge_active(page)) 1321 if (__PageMovable(page))
1315 return pfn; 1322 return pfn;
1316 else 1323
1317 pfn = round_up(pfn + 1, 1324 if (!PageHuge(page))
1318 1 << compound_order(page)) - 1; 1325 continue;
1319 } 1326 head = compound_head(page);
1320 } 1327 if (hugepage_migration_supported(page_hstate(head)) &&
1328 page_huge_active(head))
1329 return pfn;
1330 skip = (1 << compound_order(head)) - (page - head);
1331 pfn += skip - 1;
1321 } 1332 }
1322 return 0; 1333 return 0;
1323} 1334}
@@ -1344,7 +1355,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1344{ 1355{
1345 unsigned long pfn; 1356 unsigned long pfn;
1346 struct page *page; 1357 struct page *page;
1347 int not_managed = 0;
1348 int ret = 0; 1358 int ret = 0;
1349 LIST_HEAD(source); 1359 LIST_HEAD(source);
1350 1360
@@ -1392,7 +1402,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1392 else 1402 else
1393 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); 1403 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1394 if (!ret) { /* Success */ 1404 if (!ret) { /* Success */
1395 put_page(page);
1396 list_add_tail(&page->lru, &source); 1405 list_add_tail(&page->lru, &source);
1397 if (!__PageMovable(page)) 1406 if (!__PageMovable(page))
1398 inc_node_page_state(page, NR_ISOLATED_ANON + 1407 inc_node_page_state(page, NR_ISOLATED_ANON +
@@ -1401,22 +1410,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1401 } else { 1410 } else {
1402 pr_warn("failed to isolate pfn %lx\n", pfn); 1411 pr_warn("failed to isolate pfn %lx\n", pfn);
1403 dump_page(page, "isolation failed"); 1412 dump_page(page, "isolation failed");
1404 put_page(page);
1405 /* Because we don't have big zone->lock. we should
1406 check this again here. */
1407 if (page_count(page)) {
1408 not_managed++;
1409 ret = -EBUSY;
1410 break;
1411 }
1412 } 1413 }
1414 put_page(page);
1413 } 1415 }
1414 if (!list_empty(&source)) { 1416 if (!list_empty(&source)) {
1415 if (not_managed) {
1416 putback_movable_pages(&source);
1417 goto out;
1418 }
1419
1420 /* Allocate a new page from the nearest neighbor node */ 1417 /* Allocate a new page from the nearest neighbor node */
1421 ret = migrate_pages(&source, new_node_page, NULL, 0, 1418 ret = migrate_pages(&source, new_node_page, NULL, 0,
1422 MIGRATE_SYNC, MR_MEMORY_HOTPLUG); 1419 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
@@ -1429,7 +1426,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1429 putback_movable_pages(&source); 1426 putback_movable_pages(&source);
1430 } 1427 }
1431 } 1428 }
1432out: 1429
1433 return ret; 1430 return ret;
1434} 1431}
1435 1432
@@ -1576,7 +1573,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
1576 we assume this for now. .*/ 1573 we assume this for now. .*/
1577 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, 1574 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
1578 &valid_end)) { 1575 &valid_end)) {
1579 mem_hotplug_done();
1580 ret = -EINVAL; 1576 ret = -EINVAL;
1581 reason = "multizone range"; 1577 reason = "multizone range";
1582 goto failed_removal; 1578 goto failed_removal;
@@ -1591,7 +1587,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
1591 MIGRATE_MOVABLE, 1587 MIGRATE_MOVABLE,
1592 SKIP_HWPOISON | REPORT_FAILURE); 1588 SKIP_HWPOISON | REPORT_FAILURE);
1593 if (ret) { 1589 if (ret) {
1594 mem_hotplug_done();
1595 reason = "failure to isolate range"; 1590 reason = "failure to isolate range";
1596 goto failed_removal; 1591 goto failed_removal;
1597 } 1592 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d4496d9d34f5..ee2bce59d2bf 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1314 nodemask_t *nodes) 1314 nodemask_t *nodes)
1315{ 1315{
1316 unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1316 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1317 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 1317 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1318 1318
1319 if (copy > nbytes) { 1319 if (copy > nbytes) {
1320 if (copy > PAGE_SIZE) 1320 if (copy > PAGE_SIZE)
@@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy,
1491 int uninitialized_var(pval); 1491 int uninitialized_var(pval);
1492 nodemask_t nodes; 1492 nodemask_t nodes;
1493 1493
1494 if (nmask != NULL && maxnode < MAX_NUMNODES) 1494 if (nmask != NULL && maxnode < nr_node_ids)
1495 return -EINVAL; 1495 return -EINVAL;
1496 1496
1497 err = do_get_mempolicy(&pval, &nodes, addr, flags); 1497 err = do_get_mempolicy(&pval, &nodes, addr, flags);
@@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1527 unsigned long nr_bits, alloc_size; 1527 unsigned long nr_bits, alloc_size;
1528 DECLARE_BITMAP(bm, MAX_NUMNODES); 1528 DECLARE_BITMAP(bm, MAX_NUMNODES);
1529 1529
1530 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 1530 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1531 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1531 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1532 1532
1533 if (nmask) 1533 if (nmask)
diff --git a/mm/migrate.c b/mm/migrate.c
index ccf8966caf6f..d4fd680be3b0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -709,7 +709,6 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
709 /* Simple case, sync compaction */ 709 /* Simple case, sync compaction */
710 if (mode != MIGRATE_ASYNC) { 710 if (mode != MIGRATE_ASYNC) {
711 do { 711 do {
712 get_bh(bh);
713 lock_buffer(bh); 712 lock_buffer(bh);
714 bh = bh->b_this_page; 713 bh = bh->b_this_page;
715 714
@@ -720,18 +719,15 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
720 719
721 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 720 /* async case, we cannot block on lock_buffer so use trylock_buffer */
722 do { 721 do {
723 get_bh(bh);
724 if (!trylock_buffer(bh)) { 722 if (!trylock_buffer(bh)) {
725 /* 723 /*
726 * We failed to lock the buffer and cannot stall in 724 * We failed to lock the buffer and cannot stall in
727 * async migration. Release the taken locks 725 * async migration. Release the taken locks
728 */ 726 */
729 struct buffer_head *failed_bh = bh; 727 struct buffer_head *failed_bh = bh;
730 put_bh(failed_bh);
731 bh = head; 728 bh = head;
732 while (bh != failed_bh) { 729 while (bh != failed_bh) {
733 unlock_buffer(bh); 730 unlock_buffer(bh);
734 put_bh(bh);
735 bh = bh->b_this_page; 731 bh = bh->b_this_page;
736 } 732 }
737 return false; 733 return false;
@@ -818,7 +814,6 @@ unlock_buffers:
818 bh = head; 814 bh = head;
819 do { 815 do {
820 unlock_buffer(bh); 816 unlock_buffer(bh);
821 put_bh(bh);
822 bh = bh->b_this_page; 817 bh = bh->b_this_page;
823 818
824 } while (bh != head); 819 } while (bh != head);
@@ -1135,10 +1130,13 @@ out:
1135 * If migration is successful, decrease refcount of the newpage 1130 * If migration is successful, decrease refcount of the newpage
1136 * which will not free the page because new page owner increased 1131 * which will not free the page because new page owner increased
1137 * refcounter. As well, if it is LRU page, add the page to LRU 1132 * refcounter. As well, if it is LRU page, add the page to LRU
1138 * list in here. 1133 * list in here. Use the old state of the isolated source page to
1134 * determine if we migrated a LRU page. newpage was already unlocked
1135 * and possibly modified by its owner - don't rely on the page
1136 * state.
1139 */ 1137 */
1140 if (rc == MIGRATEPAGE_SUCCESS) { 1138 if (rc == MIGRATEPAGE_SUCCESS) {
1141 if (unlikely(__PageMovable(newpage))) 1139 if (unlikely(!is_lru))
1142 put_page(newpage); 1140 put_page(newpage);
1143 else 1141 else
1144 putback_lru_page(newpage); 1142 putback_lru_page(newpage);
@@ -1324,19 +1322,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
1324 goto put_anon; 1322 goto put_anon;
1325 1323
1326 if (page_mapped(hpage)) { 1324 if (page_mapped(hpage)) {
1327 struct address_space *mapping = page_mapping(hpage);
1328
1329 /*
1330 * try_to_unmap could potentially call huge_pmd_unshare.
1331 * Because of this, take semaphore in write mode here and
1332 * set TTU_RMAP_LOCKED to let lower levels know we have
1333 * taken the lock.
1334 */
1335 i_mmap_lock_write(mapping);
1336 try_to_unmap(hpage, 1325 try_to_unmap(hpage,
1337 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| 1326 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1338 TTU_RMAP_LOCKED);
1339 i_mmap_unlock_write(mapping);
1340 page_was_mapped = 1; 1327 page_was_mapped = 1;
1341 } 1328 }
1342 1329
diff --git a/mm/mincore.c b/mm/mincore.c
index f0f91461a9f4..218099b5ed31 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -42,14 +42,72 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
42 return 0; 42 return 0;
43} 43}
44 44
45static int mincore_unmapped_range(unsigned long addr, unsigned long end, 45/*
46 struct mm_walk *walk) 46 * Later we can get more picky about what "in core" means precisely.
47 * For now, simply check to see if the page is in the page cache,
48 * and is up to date; i.e. that no page-in operation would be required
49 * at this time if an application were to map and access this page.
50 */
51static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
52{
53 unsigned char present = 0;
54 struct page *page;
55
56 /*
57 * When tmpfs swaps out a page from a file, any process mapping that
58 * file will not get a swp_entry_t in its pte, but rather it is like
59 * any other file mapping (ie. marked !present and faulted in with
60 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
61 */
62#ifdef CONFIG_SWAP
63 if (shmem_mapping(mapping)) {
64 page = find_get_entry(mapping, pgoff);
65 /*
66 * shmem/tmpfs may return swap: account for swapcache
67 * page too.
68 */
69 if (xa_is_value(page)) {
70 swp_entry_t swp = radix_to_swp_entry(page);
71 page = find_get_page(swap_address_space(swp),
72 swp_offset(swp));
73 }
74 } else
75 page = find_get_page(mapping, pgoff);
76#else
77 page = find_get_page(mapping, pgoff);
78#endif
79 if (page) {
80 present = PageUptodate(page);
81 put_page(page);
82 }
83
84 return present;
85}
86
87static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
88 struct vm_area_struct *vma, unsigned char *vec)
47{ 89{
48 unsigned char *vec = walk->private;
49 unsigned long nr = (end - addr) >> PAGE_SHIFT; 90 unsigned long nr = (end - addr) >> PAGE_SHIFT;
91 int i;
50 92
51 memset(vec, 0, nr); 93 if (vma->vm_file) {
52 walk->private += nr; 94 pgoff_t pgoff;
95
96 pgoff = linear_page_index(vma, addr);
97 for (i = 0; i < nr; i++, pgoff++)
98 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
99 } else {
100 for (i = 0; i < nr; i++)
101 vec[i] = 0;
102 }
103 return nr;
104}
105
106static int mincore_unmapped_range(unsigned long addr, unsigned long end,
107 struct mm_walk *walk)
108{
109 walk->private += __mincore_unmapped_range(addr, end,
110 walk->vma, walk->private);
53 return 0; 111 return 0;
54} 112}
55 113
@@ -69,9 +127,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
69 goto out; 127 goto out;
70 } 128 }
71 129
72 /* We'll consider a THP page under construction to be there */
73 if (pmd_trans_unstable(pmd)) { 130 if (pmd_trans_unstable(pmd)) {
74 memset(vec, 1, nr); 131 __mincore_unmapped_range(addr, end, vma, vec);
75 goto out; 132 goto out;
76 } 133 }
77 134
@@ -80,17 +137,28 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
80 pte_t pte = *ptep; 137 pte_t pte = *ptep;
81 138
82 if (pte_none(pte)) 139 if (pte_none(pte))
83 *vec = 0; 140 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
141 vma, vec);
84 else if (pte_present(pte)) 142 else if (pte_present(pte))
85 *vec = 1; 143 *vec = 1;
86 else { /* pte is a swap entry */ 144 else { /* pte is a swap entry */
87 swp_entry_t entry = pte_to_swp_entry(pte); 145 swp_entry_t entry = pte_to_swp_entry(pte);
88 146
89 /* 147 if (non_swap_entry(entry)) {
90 * migration or hwpoison entries are always 148 /*
91 * uptodate 149 * migration or hwpoison entries are always
92 */ 150 * uptodate
93 *vec = !!non_swap_entry(entry); 151 */
152 *vec = 1;
153 } else {
154#ifdef CONFIG_SWAP
155 *vec = mincore_page(swap_address_space(entry),
156 swp_offset(entry));
157#else
158 WARN_ON(1);
159 *vec = 1;
160#endif
161 }
94 } 162 }
95 vec++; 163 vec++;
96 } 164 }
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f0e8cd9edb1a..26ea8636758f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -647,8 +647,8 @@ static int oom_reaper(void *unused)
647 647
648static void wake_oom_reaper(struct task_struct *tsk) 648static void wake_oom_reaper(struct task_struct *tsk)
649{ 649{
650 /* tsk is already queued? */ 650 /* mm is already queued? */
651 if (tsk == oom_reaper_list || tsk->oom_reaper_list) 651 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
652 return; 652 return;
653 653
654 get_task_struct(tsk); 654 get_task_struct(tsk);
@@ -975,6 +975,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
975 * still freeing memory. 975 * still freeing memory.
976 */ 976 */
977 read_lock(&tasklist_lock); 977 read_lock(&tasklist_lock);
978
979 /*
980 * The task 'p' might have already exited before reaching here. The
981 * put_task_struct() will free task_struct 'p' while the loop still try
982 * to access the field of 'p', so, get an extra reference.
983 */
984 get_task_struct(p);
978 for_each_thread(p, t) { 985 for_each_thread(p, t) {
979 list_for_each_entry(child, &t->children, sibling) { 986 list_for_each_entry(child, &t->children, sibling) {
980 unsigned int child_points; 987 unsigned int child_points;
@@ -994,6 +1001,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
994 } 1001 }
995 } 1002 }
996 } 1003 }
1004 put_task_struct(p);
997 read_unlock(&tasklist_lock); 1005 read_unlock(&tasklist_lock);
998 1006
999 /* 1007 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cde5dac6229a..0b9f577b1a2a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone)
2170 2170
2171 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2171 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2172 watermark_boost_factor, 10000); 2172 watermark_boost_factor, 10000);
2173
2174 /*
2175 * high watermark may be uninitialised if fragmentation occurs
2176 * very early in boot so do not boost. We do not fall
2177 * through and boost by pageblock_nr_pages as failing
2178 * allocations that early means that reclaim is not going
2179 * to help and it may even be impossible to reclaim the
2180 * boosted watermark resulting in a hang.
2181 */
2182 if (!max_boost)
2183 return;
2184
2173 max_boost = max(pageblock_nr_pages, max_boost); 2185 max_boost = max(pageblock_nr_pages, max_boost);
2174 2186
2175 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2187 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
@@ -2214,7 +2226,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
2214 */ 2226 */
2215 boost_watermark(zone); 2227 boost_watermark(zone);
2216 if (alloc_flags & ALLOC_KSWAPD) 2228 if (alloc_flags & ALLOC_KSWAPD)
2217 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 2229 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2218 2230
2219 /* We are not allowed to try stealing from the whole block */ 2231 /* We are not allowed to try stealing from the whole block */
2220 if (!whole_block) 2232 if (!whole_block)
@@ -3102,6 +3114,12 @@ struct page *rmqueue(struct zone *preferred_zone,
3102 local_irq_restore(flags); 3114 local_irq_restore(flags);
3103 3115
3104out: 3116out:
3117 /* Separate test+clear to avoid unnecessary atomics */
3118 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3119 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3120 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3121 }
3122
3105 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3123 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3106 return page; 3124 return page;
3107 3125
@@ -4669,11 +4687,11 @@ refill:
4669 /* Even if we own the page, we do not use atomic_set(). 4687 /* Even if we own the page, we do not use atomic_set().
4670 * This would break get_page_unless_zero() users. 4688 * This would break get_page_unless_zero() users.
4671 */ 4689 */
4672 page_ref_add(page, size - 1); 4690 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
4673 4691
4674 /* reset page count bias and offset to start of new frag */ 4692 /* reset page count bias and offset to start of new frag */
4675 nc->pfmemalloc = page_is_pfmemalloc(page); 4693 nc->pfmemalloc = page_is_pfmemalloc(page);
4676 nc->pagecnt_bias = size; 4694 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4677 nc->offset = size; 4695 nc->offset = size;
4678 } 4696 }
4679 4697
@@ -4689,10 +4707,10 @@ refill:
4689 size = nc->size; 4707 size = nc->size;
4690#endif 4708#endif
4691 /* OK, page count is 0, we can safely set it */ 4709 /* OK, page count is 0, we can safely set it */
4692 set_page_count(page, size); 4710 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
4693 4711
4694 /* reset page count bias and offset to start of new frag */ 4712 /* reset page count bias and offset to start of new frag */
4695 nc->pagecnt_bias = size; 4713 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4696 offset = size - fragsz; 4714 offset = size - fragsz;
4697 } 4715 }
4698 4716
@@ -5695,18 +5713,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5695 cond_resched(); 5713 cond_resched();
5696 } 5714 }
5697 } 5715 }
5698#ifdef CONFIG_SPARSEMEM
5699 /*
5700 * If the zone does not span the rest of the section then
5701 * we should at least initialize those pages. Otherwise we
5702 * could blow up on a poisoned page in some paths which depend
5703 * on full sections being initialized (e.g. memory hotplug).
5704 */
5705 while (end_pfn % PAGES_PER_SECTION) {
5706 __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
5707 end_pfn++;
5708 }
5709#endif
5710} 5716}
5711 5717
5712#ifdef CONFIG_ZONE_DEVICE 5718#ifdef CONFIG_ZONE_DEVICE
diff --git a/mm/page_ext.c b/mm/page_ext.c
index ae44f7adbe07..8c78b8d45117 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -398,10 +398,8 @@ void __init page_ext_init(void)
398 * We know some arch can have a nodes layout such as 398 * We know some arch can have a nodes layout such as
399 * -------------pfn--------------> 399 * -------------pfn-------------->
400 * N0 | N1 | N2 | N0 | N1 | N2|.... 400 * N0 | N1 | N2 | N0 | N1 | N2|....
401 *
402 * Take into account DEFERRED_STRUCT_PAGE_INIT.
403 */ 401 */
404 if (early_pfn_to_nid(pfn) != nid) 402 if (pfn_to_nid(pfn) != nid)
405 continue; 403 continue;
406 if (init_section_page_ext(pfn, nid)) 404 if (init_section_page_ext(pfn, nid))
407 goto oom; 405 goto oom;
diff --git a/mm/rmap.c b/mm/rmap.c
index 21a26cf51114..0454ecc29537 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -25,7 +25,6 @@
25 * page->flags PG_locked (lock_page) 25 * page->flags PG_locked (lock_page)
26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
27 * mapping->i_mmap_rwsem 27 * mapping->i_mmap_rwsem
28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
29 * anon_vma->rwsem 28 * anon_vma->rwsem
30 * mm->page_table_lock or pte_lock 29 * mm->page_table_lock or pte_lock
31 * zone_lru_lock (in mark_page_accessed, isolate_lru_page) 30 * zone_lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -1372,16 +1371,13 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1372 * Note that the page can not be free in this function as call of 1371 * Note that the page can not be free in this function as call of
1373 * try_to_unmap() must hold a reference on the page. 1372 * try_to_unmap() must hold a reference on the page.
1374 */ 1373 */
1375 mmu_notifier_range_init(&range, vma->vm_mm, vma->vm_start, 1374 mmu_notifier_range_init(&range, vma->vm_mm, address,
1376 min(vma->vm_end, vma->vm_start + 1375 min(vma->vm_end, address +
1377 (PAGE_SIZE << compound_order(page)))); 1376 (PAGE_SIZE << compound_order(page))));
1378 if (PageHuge(page)) { 1377 if (PageHuge(page)) {
1379 /* 1378 /*
1380 * If sharing is possible, start and end will be adjusted 1379 * If sharing is possible, start and end will be adjusted
1381 * accordingly. 1380 * accordingly.
1382 *
1383 * If called for a huge page, caller must hold i_mmap_rwsem
1384 * in write mode as it is possible to call huge_pmd_unshare.
1385 */ 1381 */
1386 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1382 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1387 &range.end); 1383 &range.end);
diff --git a/mm/shmem.c b/mm/shmem.c
index 6ece1e2fe76e..0905215fb016 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2854,10 +2854,14 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
2854 * No ordinary (disk based) filesystem counts links as inodes; 2854 * No ordinary (disk based) filesystem counts links as inodes;
2855 * but each new link needs a new dentry, pinning lowmem, and 2855 * but each new link needs a new dentry, pinning lowmem, and
2856 * tmpfs dentries cannot be pruned until they are unlinked. 2856 * tmpfs dentries cannot be pruned until they are unlinked.
2857 * But if an O_TMPFILE file is linked into the tmpfs, the
2858 * first link must skip that, to get the accounting right.
2857 */ 2859 */
2858 ret = shmem_reserve_inode(inode->i_sb); 2860 if (inode->i_nlink) {
2859 if (ret) 2861 ret = shmem_reserve_inode(inode->i_sb);
2860 goto out; 2862 if (ret)
2863 goto out;
2864 }
2861 2865
2862 dir->i_size += BOGO_DIRENT_SIZE; 2866 dir->i_size += BOGO_DIRENT_SIZE;
2863 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 2867 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
diff --git a/mm/slab.c b/mm/slab.c
index 73fe23e649c9..91c1863df93d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -666,8 +666,10 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
666 struct alien_cache *alc = NULL; 666 struct alien_cache *alc = NULL;
667 667
668 alc = kmalloc_node(memsize, gfp, node); 668 alc = kmalloc_node(memsize, gfp, node);
669 init_arraycache(&alc->ac, entries, batch); 669 if (alc) {
670 spin_lock_init(&alc->lock); 670 init_arraycache(&alc->ac, entries, batch);
671 spin_lock_init(&alc->lock);
672 }
671 return alc; 673 return alc;
672} 674}
673 675
@@ -2357,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
2357 void *freelist; 2359 void *freelist;
2358 void *addr = page_address(page); 2360 void *addr = page_address(page);
2359 2361
2360 page->s_mem = kasan_reset_tag(addr) + colour_off; 2362 page->s_mem = addr + colour_off;
2361 page->active = 0; 2363 page->active = 0;
2362 2364
2363 if (OBJFREELIST_SLAB(cachep)) 2365 if (OBJFREELIST_SLAB(cachep))
@@ -2366,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
2366 /* Slab management obj is off-slab. */ 2368 /* Slab management obj is off-slab. */
2367 freelist = kmem_cache_alloc_node(cachep->freelist_cache, 2369 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2368 local_flags, nodeid); 2370 local_flags, nodeid);
2371 freelist = kasan_reset_tag(freelist);
2369 if (!freelist) 2372 if (!freelist)
2370 return NULL; 2373 return NULL;
2371 } else { 2374 } else {
@@ -2679,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
2679 2682
2680 offset *= cachep->colour_off; 2683 offset *= cachep->colour_off;
2681 2684
2685 /*
2686 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
2687 * page_address() in the latter returns a non-tagged pointer,
2688 * as it should be for slab pages.
2689 */
2690 kasan_poison_slab(page);
2691
2682 /* Get slab management. */ 2692 /* Get slab management. */
2683 freelist = alloc_slabmgmt(cachep, page, offset, 2693 freelist = alloc_slabmgmt(cachep, page, offset,
2684 local_flags & ~GFP_CONSTRAINT_MASK, page_node); 2694 local_flags & ~GFP_CONSTRAINT_MASK, page_node);
@@ -2687,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
2687 2697
2688 slab_map_pages(cachep, page, freelist); 2698 slab_map_pages(cachep, page, freelist);
2689 2699
2690 kasan_poison_slab(page);
2691 cache_init_objs(cachep, page); 2700 cache_init_objs(cachep, page);
2692 2701
2693 if (gfpflags_allow_blocking(local_flags)) 2702 if (gfpflags_allow_blocking(local_flags))
@@ -3538,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3538{ 3547{
3539 void *ret = slab_alloc(cachep, flags, _RET_IP_); 3548 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3540 3549
3541 ret = kasan_slab_alloc(cachep, ret, flags);
3542 trace_kmem_cache_alloc(_RET_IP_, ret, 3550 trace_kmem_cache_alloc(_RET_IP_, ret,
3543 cachep->object_size, cachep->size, flags); 3551 cachep->object_size, cachep->size, flags);
3544 3552
@@ -3628,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3628{ 3636{
3629 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3637 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3630 3638
3631 ret = kasan_slab_alloc(cachep, ret, flags);
3632 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3639 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3633 cachep->object_size, cachep->size, 3640 cachep->object_size, cachep->size,
3634 flags, nodeid); 3641 flags, nodeid);
@@ -4406,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4406 unsigned int objnr; 4413 unsigned int objnr;
4407 unsigned long offset; 4414 unsigned long offset;
4408 4415
4416 ptr = kasan_reset_tag(ptr);
4417
4409 /* Find and validate object. */ 4418 /* Find and validate object. */
4410 cachep = page->slab_cache; 4419 cachep = page->slab_cache;
4411 objnr = obj_to_index(cachep, page, (void *)ptr); 4420 objnr = obj_to_index(cachep, page, (void *)ptr);
diff --git a/mm/slab.h b/mm/slab.h
index 4190c24ef0e9..384105318779 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -437,11 +437,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
437 437
438 flags &= gfp_allowed_mask; 438 flags &= gfp_allowed_mask;
439 for (i = 0; i < size; i++) { 439 for (i = 0; i < size; i++) {
440 void *object = p[i]; 440 p[i] = kasan_slab_alloc(s, p[i], flags);
441 441 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
442 kmemleak_alloc_recursive(object, s->object_size, 1, 442 kmemleak_alloc_recursive(p[i], s->object_size, 1,
443 s->flags, flags); 443 s->flags, flags);
444 p[i] = kasan_slab_alloc(s, object, flags);
445 } 444 }
446 445
447 if (memcg_kmem_enabled()) 446 if (memcg_kmem_enabled())
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 81732d05e74a..f9d89c1b5977 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1228,8 +1228,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1228 flags |= __GFP_COMP; 1228 flags |= __GFP_COMP;
1229 page = alloc_pages(flags, order); 1229 page = alloc_pages(flags, order);
1230 ret = page ? page_address(page) : NULL; 1230 ret = page ? page_address(page) : NULL;
1231 kmemleak_alloc(ret, size, 1, flags);
1232 ret = kasan_kmalloc_large(ret, size, flags); 1231 ret = kasan_kmalloc_large(ret, size, flags);
1232 /* As ret might get tagged, call kmemleak hook after KASAN. */
1233 kmemleak_alloc(ret, size, 1, flags);
1233 return ret; 1234 return ret;
1234} 1235}
1235EXPORT_SYMBOL(kmalloc_order); 1236EXPORT_SYMBOL(kmalloc_order);
diff --git a/mm/slub.c b/mm/slub.c
index 36c0befeebd8..dc777761b6b7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
249 unsigned long ptr_addr) 249 unsigned long ptr_addr)
250{ 250{
251#ifdef CONFIG_SLAB_FREELIST_HARDENED 251#ifdef CONFIG_SLAB_FREELIST_HARDENED
252 return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); 252 /*
253 * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
254 * Normally, this doesn't cause any issues, as both set_freepointer()
255 * and get_freepointer() are called with a pointer with the same tag.
256 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
257 * example, when __free_slub() iterates over objects in a cache, it
258 * passes untagged pointers to check_object(). check_object() in turns
259 * calls get_freepointer() with an untagged pointer, which causes the
260 * freepointer to be restored incorrectly.
261 */
262 return (void *)((unsigned long)ptr ^ s->random ^
263 (unsigned long)kasan_reset_tag((void *)ptr_addr));
253#else 264#else
254 return ptr; 265 return ptr;
255#endif 266#endif
@@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
303 __p < (__addr) + (__objects) * (__s)->size; \ 314 __p < (__addr) + (__objects) * (__s)->size; \
304 __p += (__s)->size) 315 __p += (__s)->size)
305 316
306#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
307 for (__p = fixup_red_left(__s, __addr), __idx = 1; \
308 __idx <= __objects; \
309 __p += (__s)->size, __idx++)
310
311/* Determine object index from a given position */ 317/* Determine object index from a given position */
312static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) 318static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
313{ 319{
314 return (p - addr) / s->size; 320 return (kasan_reset_tag(p) - addr) / s->size;
315} 321}
316 322
317static inline unsigned int order_objects(unsigned int order, unsigned int size) 323static inline unsigned int order_objects(unsigned int order, unsigned int size)
@@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s,
507 return 1; 513 return 1;
508 514
509 base = page_address(page); 515 base = page_address(page);
516 object = kasan_reset_tag(object);
510 object = restore_red_left(s, object); 517 object = restore_red_left(s, object);
511 if (object < base || object >= base + page->objects * s->size || 518 if (object < base || object >= base + page->objects * s->size ||
512 (object - base) % s->size) { 519 (object - base) % s->size) {
@@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
1075 init_tracking(s, object); 1082 init_tracking(s, object);
1076} 1083}
1077 1084
1085static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
1086{
1087 if (!(s->flags & SLAB_POISON))
1088 return;
1089
1090 metadata_access_enable();
1091 memset(addr, POISON_INUSE, PAGE_SIZE << order);
1092 metadata_access_disable();
1093}
1094
1078static inline int alloc_consistency_checks(struct kmem_cache *s, 1095static inline int alloc_consistency_checks(struct kmem_cache *s,
1079 struct page *page, 1096 struct page *page,
1080 void *object, unsigned long addr) 1097 void *object, unsigned long addr)
@@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
1330#else /* !CONFIG_SLUB_DEBUG */ 1347#else /* !CONFIG_SLUB_DEBUG */
1331static inline void setup_object_debug(struct kmem_cache *s, 1348static inline void setup_object_debug(struct kmem_cache *s,
1332 struct page *page, void *object) {} 1349 struct page *page, void *object) {}
1350static inline void setup_page_debug(struct kmem_cache *s,
1351 void *addr, int order) {}
1333 1352
1334static inline int alloc_debug_processing(struct kmem_cache *s, 1353static inline int alloc_debug_processing(struct kmem_cache *s,
1335 struct page *page, void *object, unsigned long addr) { return 0; } 1354 struct page *page, void *object, unsigned long addr) { return 0; }
@@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
1374 */ 1393 */
1375static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1394static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1376{ 1395{
1396 ptr = kasan_kmalloc_large(ptr, size, flags);
1397 /* As ptr might get tagged, call kmemleak hook after KASAN. */
1377 kmemleak_alloc(ptr, size, 1, flags); 1398 kmemleak_alloc(ptr, size, 1, flags);
1378 return kasan_kmalloc_large(ptr, size, flags); 1399 return ptr;
1379} 1400}
1380 1401
1381static __always_inline void kfree_hook(void *x) 1402static __always_inline void kfree_hook(void *x)
@@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1641 if (page_is_pfmemalloc(page)) 1662 if (page_is_pfmemalloc(page))
1642 SetPageSlabPfmemalloc(page); 1663 SetPageSlabPfmemalloc(page);
1643 1664
1644 start = page_address(page); 1665 kasan_poison_slab(page);
1645 1666
1646 if (unlikely(s->flags & SLAB_POISON)) 1667 start = page_address(page);
1647 memset(start, POISON_INUSE, PAGE_SIZE << order);
1648 1668
1649 kasan_poison_slab(page); 1669 setup_page_debug(s, start, order);
1650 1670
1651 shuffle = shuffle_freelist(s, page); 1671 shuffle = shuffle_freelist(s, page);
1652 1672
1653 if (!shuffle) { 1673 if (!shuffle) {
1654 for_each_object_idx(p, idx, s, start, page->objects) {
1655 if (likely(idx < page->objects)) {
1656 next = p + s->size;
1657 next = setup_object(s, page, next);
1658 set_freepointer(s, p, next);
1659 } else
1660 set_freepointer(s, p, NULL);
1661 }
1662 start = fixup_red_left(s, start); 1674 start = fixup_red_left(s, start);
1663 start = setup_object(s, page, start); 1675 start = setup_object(s, page, start);
1664 page->freelist = start; 1676 page->freelist = start;
1677 for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1678 next = p + s->size;
1679 next = setup_object(s, page, next);
1680 set_freepointer(s, p, next);
1681 p = next;
1682 }
1683 set_freepointer(s, p, NULL);
1665 } 1684 }
1666 1685
1667 page->inuse = page->objects; 1686 page->inuse = page->objects;
@@ -3846,6 +3865,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3846 unsigned int offset; 3865 unsigned int offset;
3847 size_t object_size; 3866 size_t object_size;
3848 3867
3868 ptr = kasan_reset_tag(ptr);
3869
3849 /* Find object and usable object size. */ 3870 /* Find object and usable object size. */
3850 s = page->slab_cache; 3871 s = page->slab_cache;
3851 3872
diff --git a/mm/swap.c b/mm/swap.c
index 4929bc1be60e..4d7d37eb3c40 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu)
320{ 320{
321} 321}
322 322
323static bool need_activate_page_drain(int cpu)
324{
325 return false;
326}
327
328void activate_page(struct page *page) 323void activate_page(struct page *page)
329{ 324{
330 struct zone *zone = page_zone(page); 325 struct zone *zone = page_zone(page);
@@ -653,13 +648,15 @@ void lru_add_drain(void)
653 put_cpu(); 648 put_cpu();
654} 649}
655 650
651#ifdef CONFIG_SMP
652
653static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
654
656static void lru_add_drain_per_cpu(struct work_struct *dummy) 655static void lru_add_drain_per_cpu(struct work_struct *dummy)
657{ 656{
658 lru_add_drain(); 657 lru_add_drain();
659} 658}
660 659
661static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
662
663/* 660/*
664 * Doesn't need any cpu hotplug locking because we do rely on per-cpu 661 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
665 * kworkers being shut down before our page_alloc_cpu_dead callback is 662 * kworkers being shut down before our page_alloc_cpu_dead callback is
@@ -702,6 +699,12 @@ void lru_add_drain_all(void)
702 699
703 mutex_unlock(&lock); 700 mutex_unlock(&lock);
704} 701}
702#else
703void lru_add_drain_all(void)
704{
705 lru_add_drain();
706}
707#endif
705 708
706/** 709/**
707 * release_pages - batched put_page() 710 * release_pages - batched put_page()
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 852eb4e53f06..14faadcedd06 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -247,7 +247,8 @@ static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
247/* 247/*
248 * Validates that the given object is: 248 * Validates that the given object is:
249 * - not bogus address 249 * - not bogus address
250 * - known-safe heap or stack object 250 * - fully contained by stack (or stack frame, when available)
251 * - fully within SLAB object (or object whitelist area, when available)
251 * - not in kernel text 252 * - not in kernel text
252 */ 253 */
253void __check_object_size(const void *ptr, unsigned long n, bool to_user) 254void __check_object_size(const void *ptr, unsigned long n, bool to_user)
@@ -262,9 +263,6 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
262 /* Check for invalid addresses. */ 263 /* Check for invalid addresses. */
263 check_bogus_address((const unsigned long)ptr, n, to_user); 264 check_bogus_address((const unsigned long)ptr, n, to_user);
264 265
265 /* Check for bad heap object. */
266 check_heap_object(ptr, n, to_user);
267
268 /* Check for bad stack object. */ 266 /* Check for bad stack object. */
269 switch (check_stack_object(ptr, n)) { 267 switch (check_stack_object(ptr, n)) {
270 case NOT_STACK: 268 case NOT_STACK:
@@ -282,6 +280,9 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
282 usercopy_abort("process stack", NULL, to_user, 0, n); 280 usercopy_abort("process stack", NULL, to_user, 0, n);
283 } 281 }
284 282
283 /* Check for bad heap object. */
284 check_heap_object(ptr, n, to_user);
285
285 /* Check for object in kernel to avoid text exposure. */ 286 /* Check for object in kernel to avoid text exposure. */
286 check_kernel_text_object((const unsigned long)ptr, n, to_user); 287 check_kernel_text_object((const unsigned long)ptr, n, to_user);
287} 288}
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 065c1ce191c4..d59b5a73dfb3 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -267,14 +267,10 @@ retry:
267 VM_BUG_ON(dst_addr & ~huge_page_mask(h)); 267 VM_BUG_ON(dst_addr & ~huge_page_mask(h));
268 268
269 /* 269 /*
270 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. 270 * Serialize via hugetlb_fault_mutex
271 * i_mmap_rwsem ensures the dst_pte remains valid even
272 * in the case of shared pmds. fault mutex prevents
273 * races with other faulting threads.
274 */ 271 */
275 mapping = dst_vma->vm_file->f_mapping;
276 i_mmap_lock_read(mapping);
277 idx = linear_page_index(dst_vma, dst_addr); 272 idx = linear_page_index(dst_vma, dst_addr);
273 mapping = dst_vma->vm_file->f_mapping;
278 hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, 274 hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
279 idx, dst_addr); 275 idx, dst_addr);
280 mutex_lock(&hugetlb_fault_mutex_table[hash]); 276 mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -283,7 +279,6 @@ retry:
283 dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); 279 dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
284 if (!dst_pte) { 280 if (!dst_pte) {
285 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 281 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
286 i_mmap_unlock_read(mapping);
287 goto out_unlock; 282 goto out_unlock;
288 } 283 }
289 284
@@ -291,7 +286,6 @@ retry:
291 dst_pteval = huge_ptep_get(dst_pte); 286 dst_pteval = huge_ptep_get(dst_pte);
292 if (!huge_pte_none(dst_pteval)) { 287 if (!huge_pte_none(dst_pteval)) {
293 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 288 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
294 i_mmap_unlock_read(mapping);
295 goto out_unlock; 289 goto out_unlock;
296 } 290 }
297 291
@@ -299,7 +293,6 @@ retry:
299 dst_addr, src_addr, &page); 293 dst_addr, src_addr, &page);
300 294
301 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 295 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
302 i_mmap_unlock_read(mapping);
303 vm_alloc_shared = vm_shared; 296 vm_alloc_shared = vm_shared;
304 297
305 cond_resched(); 298 cond_resched();
diff --git a/mm/util.c b/mm/util.c
index 4df23d64aac7..379319b1bcfd 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -150,7 +150,7 @@ void *memdup_user(const void __user *src, size_t len)
150{ 150{
151 void *p; 151 void *p;
152 152
153 p = kmalloc_track_caller(len, GFP_USER); 153 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
154 if (!p) 154 if (!p)
155 return ERR_PTR(-ENOMEM); 155 return ERR_PTR(-ENOMEM);
156 156
@@ -478,7 +478,7 @@ bool page_mapped(struct page *page)
478 return true; 478 return true;
479 if (PageHuge(page)) 479 if (PageHuge(page))
480 return false; 480 return false;
481 for (i = 0; i < hpage_nr_pages(page); i++) { 481 for (i = 0; i < (1 << compound_order(page)); i++) {
482 if (atomic_read(&page[i]._mapcount) >= 0) 482 if (atomic_read(&page[i]._mapcount) >= 0)
483 return true; 483 return true;
484 } 484 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a714c4f800e9..e979705bbf32 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
491 delta = freeable / 2; 491 delta = freeable / 2;
492 } 492 }
493 493
494 /*
495 * Make sure we apply some minimal pressure on default priority
496 * even on small cgroups. Stale objects are not only consuming memory
497 * by themselves, but can also hold a reference to a dying cgroup,
498 * preventing it from being reclaimed. A dying cgroup with all
499 * corresponding structures like per-cpu stats and kmem caches
500 * can be really big, so it may lead to a significant waste of memory.
501 */
502 delta = max_t(unsigned long long, delta, min(freeable, batch_size));
503
504 total_scan += delta; 494 total_scan += delta;
505 if (total_scan < 0) { 495 if (total_scan < 0) {
506 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", 496 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 70417e9b932d..314bbc8010fb 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
114 dst = (ax25_address *)(bp + 1); 114 dst = (ax25_address *)(bp + 1);
115 src = (ax25_address *)(bp + 8); 115 src = (ax25_address *)(bp + 8);
116 116
117 ax25_route_lock_use();
117 route = ax25_get_route(dst, NULL); 118 route = ax25_get_route(dst, NULL);
118 if (route) { 119 if (route) {
119 digipeat = route->digipeat; 120 digipeat = route->digipeat;
@@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
206 ax25_queue_xmit(skb, dev); 207 ax25_queue_xmit(skb, dev);
207 208
208put: 209put:
209 if (route)
210 ax25_put_route(route);
211 210
211 ax25_route_lock_unuse();
212 return NETDEV_TX_OK; 212 return NETDEV_TX_OK;
213} 213}
214 214
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a0eff323af12..66f74c85cf6b 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -40,7 +40,7 @@
40#include <linux/export.h> 40#include <linux/export.h>
41 41
42static ax25_route *ax25_route_list; 42static ax25_route *ax25_route_list;
43static DEFINE_RWLOCK(ax25_route_lock); 43DEFINE_RWLOCK(ax25_route_lock);
44 44
45void ax25_rt_device_down(struct net_device *dev) 45void ax25_rt_device_down(struct net_device *dev)
46{ 46{
@@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = {
335 * Find AX.25 route 335 * Find AX.25 route
336 * 336 *
337 * Only routes with a reference count of zero can be destroyed. 337 * Only routes with a reference count of zero can be destroyed.
338 * Must be called with ax25_route_lock read locked.
338 */ 339 */
339ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) 340ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
340{ 341{
@@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
342 ax25_route *ax25_def_rt = NULL; 343 ax25_route *ax25_def_rt = NULL;
343 ax25_route *ax25_rt; 344 ax25_route *ax25_rt;
344 345
345 read_lock(&ax25_route_lock);
346 /* 346 /*
347 * Bind to the physical interface we heard them on, or the default 347 * Bind to the physical interface we heard them on, or the default
348 * route if none is found; 348 * route if none is found;
@@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
365 if (ax25_spe_rt != NULL) 365 if (ax25_spe_rt != NULL)
366 ax25_rt = ax25_spe_rt; 366 ax25_rt = ax25_spe_rt;
367 367
368 if (ax25_rt != NULL)
369 ax25_hold_route(ax25_rt);
370
371 read_unlock(&ax25_route_lock);
372
373 return ax25_rt; 368 return ax25_rt;
374} 369}
375 370
@@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
400 ax25_route *ax25_rt; 395 ax25_route *ax25_rt;
401 int err = 0; 396 int err = 0;
402 397
403 if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) 398 ax25_route_lock_use();
399 ax25_rt = ax25_get_route(addr, NULL);
400 if (!ax25_rt) {
401 ax25_route_lock_unuse();
404 return -EHOSTUNREACH; 402 return -EHOSTUNREACH;
405 403 }
406 if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { 404 if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
407 err = -EHOSTUNREACH; 405 err = -EHOSTUNREACH;
408 goto put; 406 goto put;
@@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
437 } 435 }
438 436
439put: 437put:
440 ax25_put_route(ax25_rt); 438 ax25_route_lock_unuse();
441
442 return err; 439 return err;
443} 440}
444 441
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index e8090f099eb8..ef0dec20c7d8 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
104 104
105 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); 105 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
106 106
107 /* free the TID stats immediately */
108 cfg80211_sinfo_release_content(&sinfo);
109
107 dev_put(real_netdev); 110 dev_put(real_netdev);
108 if (ret == -ENOENT) { 111 if (ret == -ENOENT) {
109 /* Node is not associated anymore! It would be 112 /* Node is not associated anymore! It would be
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 508f4416dfc9..415d494cbe22 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -20,7 +20,6 @@
20#include "main.h" 20#include "main.h"
21 21
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <linux/bug.h>
24#include <linux/byteorder/generic.h> 23#include <linux/byteorder/generic.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26#include <linux/gfp.h> 25#include <linux/gfp.h>
@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
179 parent_dev = __dev_get_by_index((struct net *)parent_net, 178 parent_dev = __dev_get_by_index((struct net *)parent_net,
180 dev_get_iflink(net_dev)); 179 dev_get_iflink(net_dev));
181 /* if we got a NULL parent_dev there is something broken.. */ 180 /* if we got a NULL parent_dev there is something broken.. */
182 if (WARN(!parent_dev, "Cannot find parent device")) 181 if (!parent_dev) {
182 pr_err("Cannot find parent device\n");
183 return false; 183 return false;
184 }
184 185
185 if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) 186 if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
186 return false; 187 return false;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5db5a0a4c959..ffc83bebfe40 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -221,10 +221,14 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
221 221
222 netif_trans_update(soft_iface); 222 netif_trans_update(soft_iface);
223 vid = batadv_get_vid(skb, 0); 223 vid = batadv_get_vid(skb, 0);
224
225 skb_reset_mac_header(skb);
224 ethhdr = eth_hdr(skb); 226 ethhdr = eth_hdr(skb);
225 227
226 switch (ntohs(ethhdr->h_proto)) { 228 switch (ntohs(ethhdr->h_proto)) {
227 case ETH_P_8021Q: 229 case ETH_P_8021Q:
230 if (!pskb_may_pull(skb, sizeof(*vhdr)))
231 goto dropped;
228 vhdr = vlan_eth_hdr(skb); 232 vhdr = vlan_eth_hdr(skb);
229 233
230 /* drop batman-in-batman packets to prevent loops */ 234 /* drop batman-in-batman packets to prevent loops */
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..e31e1b20f7f4 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -13,27 +13,13 @@
13#include <net/sock.h> 13#include <net/sock.h>
14#include <net/tcp.h> 14#include <net/tcp.h>
15 15
16static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, 16static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) 17 u32 *retval, u32 *time)
18{
19 u32 ret;
20
21 preempt_disable();
22 rcu_read_lock();
23 bpf_cgroup_storage_set(storage);
24 ret = BPF_PROG_RUN(prog, ctx);
25 rcu_read_unlock();
26 preempt_enable();
27
28 return ret;
29}
30
31static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32 u32 *time)
33{ 18{
34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 19 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
35 enum bpf_cgroup_storage_type stype; 20 enum bpf_cgroup_storage_type stype;
36 u64 time_start, time_spent = 0; 21 u64 time_start, time_spent = 0;
22 int ret = 0;
37 u32 i; 23 u32 i;
38 24
39 for_each_cgroup_storage_type(stype) { 25 for_each_cgroup_storage_type(stype) {
@@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
48 34
49 if (!repeat) 35 if (!repeat)
50 repeat = 1; 36 repeat = 1;
37
38 rcu_read_lock();
39 preempt_disable();
51 time_start = ktime_get_ns(); 40 time_start = ktime_get_ns();
52 for (i = 0; i < repeat; i++) { 41 for (i = 0; i < repeat; i++) {
53 *ret = bpf_test_run_one(prog, ctx, storage); 42 bpf_cgroup_storage_set(storage);
43 *retval = BPF_PROG_RUN(prog, ctx);
44
45 if (signal_pending(current)) {
46 ret = -EINTR;
47 break;
48 }
49
54 if (need_resched()) { 50 if (need_resched()) {
55 if (signal_pending(current))
56 break;
57 time_spent += ktime_get_ns() - time_start; 51 time_spent += ktime_get_ns() - time_start;
52 preempt_enable();
53 rcu_read_unlock();
54
58 cond_resched(); 55 cond_resched();
56
57 rcu_read_lock();
58 preempt_disable();
59 time_start = ktime_get_ns(); 59 time_start = ktime_get_ns();
60 } 60 }
61 } 61 }
62 time_spent += ktime_get_ns() - time_start; 62 time_spent += ktime_get_ns() - time_start;
63 preempt_enable();
64 rcu_read_unlock();
65
63 do_div(time_spent, repeat); 66 do_div(time_spent, repeat);
64 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 67 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
65 68
66 for_each_cgroup_storage_type(stype) 69 for_each_cgroup_storage_type(stype)
67 bpf_cgroup_storage_free(storage[stype]); 70 bpf_cgroup_storage_free(storage[stype]);
68 71
69 return 0; 72 return ret;
70} 73}
71 74
72static int bpf_test_finish(const union bpf_attr *kattr, 75static int bpf_test_finish(const union bpf_attr *kattr,
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index 7acfc83087d5..7ee4fea93637 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -13,39 +13,24 @@
13extern char bpfilter_umh_start; 13extern char bpfilter_umh_start;
14extern char bpfilter_umh_end; 14extern char bpfilter_umh_end;
15 15
16static struct umh_info info; 16static void shutdown_umh(void)
17/* since ip_getsockopt() can run in parallel, serialize access to umh */
18static DEFINE_MUTEX(bpfilter_lock);
19
20static void shutdown_umh(struct umh_info *info)
21{ 17{
22 struct task_struct *tsk; 18 struct task_struct *tsk;
23 19
24 if (!info->pid) 20 if (bpfilter_ops.stop)
25 return; 21 return;
26 tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID); 22
23 tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID);
27 if (tsk) { 24 if (tsk) {
28 force_sig(SIGKILL, tsk); 25 force_sig(SIGKILL, tsk);
29 put_task_struct(tsk); 26 put_task_struct(tsk);
30 } 27 }
31 fput(info->pipe_to_umh);
32 fput(info->pipe_from_umh);
33 info->pid = 0;
34} 28}
35 29
36static void __stop_umh(void) 30static void __stop_umh(void)
37{ 31{
38 if (IS_ENABLED(CONFIG_INET)) { 32 if (IS_ENABLED(CONFIG_INET))
39 bpfilter_process_sockopt = NULL; 33 shutdown_umh();
40 shutdown_umh(&info);
41 }
42}
43
44static void stop_umh(void)
45{
46 mutex_lock(&bpfilter_lock);
47 __stop_umh();
48 mutex_unlock(&bpfilter_lock);
49} 34}
50 35
51static int __bpfilter_process_sockopt(struct sock *sk, int optname, 36static int __bpfilter_process_sockopt(struct sock *sk, int optname,
@@ -63,10 +48,10 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
63 req.cmd = optname; 48 req.cmd = optname;
64 req.addr = (long __force __user)optval; 49 req.addr = (long __force __user)optval;
65 req.len = optlen; 50 req.len = optlen;
66 mutex_lock(&bpfilter_lock); 51 if (!bpfilter_ops.info.pid)
67 if (!info.pid)
68 goto out; 52 goto out;
69 n = __kernel_write(info.pipe_to_umh, &req, sizeof(req), &pos); 53 n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
54 &pos);
70 if (n != sizeof(req)) { 55 if (n != sizeof(req)) {
71 pr_err("write fail %zd\n", n); 56 pr_err("write fail %zd\n", n);
72 __stop_umh(); 57 __stop_umh();
@@ -74,7 +59,8 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
74 goto out; 59 goto out;
75 } 60 }
76 pos = 0; 61 pos = 0;
77 n = kernel_read(info.pipe_from_umh, &reply, sizeof(reply), &pos); 62 n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply),
63 &pos);
78 if (n != sizeof(reply)) { 64 if (n != sizeof(reply)) {
79 pr_err("read fail %zd\n", n); 65 pr_err("read fail %zd\n", n);
80 __stop_umh(); 66 __stop_umh();
@@ -83,37 +69,59 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
83 } 69 }
84 ret = reply.status; 70 ret = reply.status;
85out: 71out:
86 mutex_unlock(&bpfilter_lock);
87 return ret; 72 return ret;
88} 73}
89 74
90static int __init load_umh(void) 75static int start_umh(void)
91{ 76{
92 int err; 77 int err;
93 78
94 /* fork usermode process */ 79 /* fork usermode process */
95 info.cmdline = "bpfilter_umh";
96 err = fork_usermode_blob(&bpfilter_umh_start, 80 err = fork_usermode_blob(&bpfilter_umh_start,
97 &bpfilter_umh_end - &bpfilter_umh_start, 81 &bpfilter_umh_end - &bpfilter_umh_start,
98 &info); 82 &bpfilter_ops.info);
99 if (err) 83 if (err)
100 return err; 84 return err;
101 pr_info("Loaded bpfilter_umh pid %d\n", info.pid); 85 bpfilter_ops.stop = false;
86 pr_info("Loaded bpfilter_umh pid %d\n", bpfilter_ops.info.pid);
102 87
103 /* health check that usermode process started correctly */ 88 /* health check that usermode process started correctly */
104 if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { 89 if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
105 stop_umh(); 90 shutdown_umh();
106 return -EFAULT; 91 return -EFAULT;
107 } 92 }
108 if (IS_ENABLED(CONFIG_INET))
109 bpfilter_process_sockopt = &__bpfilter_process_sockopt;
110 93
111 return 0; 94 return 0;
112} 95}
113 96
97static int __init load_umh(void)
98{
99 int err;
100
101 mutex_lock(&bpfilter_ops.lock);
102 if (!bpfilter_ops.stop) {
103 err = -EFAULT;
104 goto out;
105 }
106 err = start_umh();
107 if (!err && IS_ENABLED(CONFIG_INET)) {
108 bpfilter_ops.sockopt = &__bpfilter_process_sockopt;
109 bpfilter_ops.start = &start_umh;
110 }
111out:
112 mutex_unlock(&bpfilter_ops.lock);
113 return err;
114}
115
114static void __exit fini_umh(void) 116static void __exit fini_umh(void)
115{ 117{
116 stop_umh(); 118 mutex_lock(&bpfilter_ops.lock);
119 if (IS_ENABLED(CONFIG_INET)) {
120 shutdown_umh();
121 bpfilter_ops.start = NULL;
122 bpfilter_ops.sockopt = NULL;
123 }
124 mutex_unlock(&bpfilter_ops.lock);
117} 125}
118module_init(load_umh); 126module_init(load_umh);
119module_exit(fini_umh); 127module_exit(fini_umh);
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S
index 40311d10d2f2..9ea6100dca87 100644
--- a/net/bpfilter/bpfilter_umh_blob.S
+++ b/net/bpfilter/bpfilter_umh_blob.S
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2 .section .init.rodata, "a" 2 .section .rodata, "a"
3 .global bpfilter_umh_start 3 .global bpfilter_umh_start
4bpfilter_umh_start: 4bpfilter_umh_start:
5 .incbin "net/bpfilter/bpfilter_umh" 5 .incbin "net/bpfilter/bpfilter_umh"
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index fe3c758791ca..9e14767500ea 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -1128,6 +1128,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1128 err = -ENOMEM; 1128 err = -ENOMEM;
1129 goto err_unlock; 1129 goto err_unlock;
1130 } 1130 }
1131 if (swdev_notify)
1132 fdb->added_by_user = 1;
1131 fdb->added_by_external_learn = 1; 1133 fdb->added_by_external_learn = 1;
1132 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); 1134 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1133 } else { 1135 } else {
@@ -1147,6 +1149,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1147 modified = true; 1149 modified = true;
1148 } 1150 }
1149 1151
1152 if (swdev_notify)
1153 fdb->added_by_user = 1;
1154
1150 if (modified) 1155 if (modified)
1151 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); 1156 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1152 } 1157 }
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 5372e2042adf..48ddc60b4fbd 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
36 36
37int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 37int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
38{ 38{
39 skb_push(skb, ETH_HLEN);
39 if (!is_skb_forwardable(skb->dev, skb)) 40 if (!is_skb_forwardable(skb->dev, skb))
40 goto drop; 41 goto drop;
41 42
42 skb_push(skb, ETH_HLEN);
43 br_drop_fake_rtable(skb); 43 br_drop_fake_rtable(skb);
44 44
45 if (skb->ip_summed == CHECKSUM_PARTIAL && 45 if (skb->ip_summed == CHECKSUM_PARTIAL &&
@@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
65 65
66int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 66int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
67{ 67{
68 skb->tstamp = 0;
68 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, 69 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
69 net, sk, skb, NULL, skb->dev, 70 net, sk, skb, NULL, skb->dev,
70 br_dev_queue_push_xmit); 71 br_dev_queue_push_xmit);
@@ -97,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to,
97 net = dev_net(indev); 98 net = dev_net(indev);
98 } else { 99 } else {
99 if (unlikely(netpoll_tx_running(to->br->dev))) { 100 if (unlikely(netpoll_tx_running(to->br->dev))) {
100 if (!is_skb_forwardable(skb->dev, skb)) { 101 skb_push(skb, ETH_HLEN);
102 if (!is_skb_forwardable(skb->dev, skb))
101 kfree_skb(skb); 103 kfree_skb(skb);
102 } else { 104 else
103 skb_push(skb, ETH_HLEN);
104 br_netpoll_send_skb(to, skb); 105 br_netpoll_send_skb(to, skb);
105 }
106 return; 106 return;
107 } 107 }
108 br_hook = NF_BR_LOCAL_OUT; 108 br_hook = NF_BR_LOCAL_OUT;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3aeff0895669..ac92b2eb32b1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1204,14 +1204,7 @@ static void br_multicast_query_received(struct net_bridge *br,
1204 return; 1204 return;
1205 1205
1206 br_multicast_update_query_timer(br, query, max_delay); 1206 br_multicast_update_query_timer(br, query, max_delay);
1207 1207 br_multicast_mark_router(br, port);
1208 /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
1209 * the arrival port for IGMP Queries where the source address
1210 * is 0.0.0.0 should not be added to router port list.
1211 */
1212 if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
1213 saddr->proto == htons(ETH_P_IPV6))
1214 br_multicast_mark_router(br, port);
1215} 1208}
1216 1209
1217static void br_ip4_multicast_query(struct net_bridge *br, 1210static void br_ip4_multicast_query(struct net_bridge *br,
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index d21a23698410..c93c35bb73dd 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -265,7 +265,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
265 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); 265 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
266 int ret; 266 int ret;
267 267
268 if (neigh->hh.hh_len) { 268 if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
269 neigh_hh_bridge(&neigh->hh, skb); 269 neigh_hh_bridge(&neigh->hh, skb);
270 skb->dev = nf_bridge->physindev; 270 skb->dev = nf_bridge->physindev;
271 ret = br_handle_frame_finish(net, sk, skb); 271 ret = br_handle_frame_finish(net, sk, skb);
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 94039f588f1d..564710f88f93 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
131 IPSTATS_MIB_INDISCARDS); 131 IPSTATS_MIB_INDISCARDS);
132 goto drop; 132 goto drop;
133 } 133 }
134 hdr = ipv6_hdr(skb);
134 } 135 }
135 if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) 136 if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
136 goto drop; 137 goto drop;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d240b3e7919f..eabf8bf28a3f 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -107,6 +107,7 @@ struct br_tunnel_info {
107/* private vlan flags */ 107/* private vlan flags */
108enum { 108enum {
109 BR_VLFLAG_PER_PORT_STATS = BIT(0), 109 BR_VLFLAG_PER_PORT_STATS = BIT(0),
110 BR_VLFLAG_ADDED_BY_SWITCHDEV = BIT(1),
110}; 111};
111 112
112/** 113/**
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 4a2f31157ef5..96abf8feb9dc 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -80,16 +80,18 @@ static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
80} 80}
81 81
82static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, 82static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
83 u16 vid, u16 flags, struct netlink_ext_ack *extack) 83 struct net_bridge_vlan *v, u16 flags,
84 struct netlink_ext_ack *extack)
84{ 85{
85 int err; 86 int err;
86 87
87 /* Try switchdev op first. In case it is not supported, fallback to 88 /* Try switchdev op first. In case it is not supported, fallback to
88 * 8021q add. 89 * 8021q add.
89 */ 90 */
90 err = br_switchdev_port_vlan_add(dev, vid, flags, extack); 91 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
91 if (err == -EOPNOTSUPP) 92 if (err == -EOPNOTSUPP)
92 return vlan_vid_add(dev, br->vlan_proto, vid); 93 return vlan_vid_add(dev, br->vlan_proto, v->vid);
94 v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
93 return err; 95 return err;
94} 96}
95 97
@@ -121,19 +123,17 @@ static void __vlan_del_list(struct net_bridge_vlan *v)
121} 123}
122 124
123static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, 125static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
124 u16 vid) 126 const struct net_bridge_vlan *v)
125{ 127{
126 int err; 128 int err;
127 129
128 /* Try switchdev op first. In case it is not supported, fallback to 130 /* Try switchdev op first. In case it is not supported, fallback to
129 * 8021q del. 131 * 8021q del.
130 */ 132 */
131 err = br_switchdev_port_vlan_del(dev, vid); 133 err = br_switchdev_port_vlan_del(dev, v->vid);
132 if (err == -EOPNOTSUPP) { 134 if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
133 vlan_vid_del(dev, br->vlan_proto, vid); 135 vlan_vid_del(dev, br->vlan_proto, v->vid);
134 return 0; 136 return err == -EOPNOTSUPP ? 0 : err;
135 }
136 return err;
137} 137}
138 138
139/* Returns a master vlan, if it didn't exist it gets created. In all cases a 139/* Returns a master vlan, if it didn't exist it gets created. In all cases a
@@ -242,7 +242,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
242 * This ensures tagged traffic enters the bridge when 242 * This ensures tagged traffic enters the bridge when
243 * promiscuous mode is disabled by br_manage_promisc(). 243 * promiscuous mode is disabled by br_manage_promisc().
244 */ 244 */
245 err = __vlan_vid_add(dev, br, v->vid, flags, extack); 245 err = __vlan_vid_add(dev, br, v, flags, extack);
246 if (err) 246 if (err)
247 goto out; 247 goto out;
248 248
@@ -305,7 +305,7 @@ out_fdb_insert:
305 305
306out_filt: 306out_filt:
307 if (p) { 307 if (p) {
308 __vlan_vid_del(dev, br, v->vid); 308 __vlan_vid_del(dev, br, v);
309 if (masterv) { 309 if (masterv) {
310 if (v->stats && masterv->stats != v->stats) 310 if (v->stats && masterv->stats != v->stats)
311 free_percpu(v->stats); 311 free_percpu(v->stats);
@@ -338,7 +338,7 @@ static int __vlan_del(struct net_bridge_vlan *v)
338 338
339 __vlan_delete_pvid(vg, v->vid); 339 __vlan_delete_pvid(vg, v->vid);
340 if (p) { 340 if (p) {
341 err = __vlan_vid_del(p->dev, p->br, v->vid); 341 err = __vlan_vid_del(p->dev, p->br, v);
342 if (err) 342 if (err)
343 goto out; 343 goto out;
344 } else { 344 } else {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 491828713e0b..6693e209efe8 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user,
1137 tmp.name[sizeof(tmp.name) - 1] = 0; 1137 tmp.name[sizeof(tmp.name) - 1] = 0;
1138 1138
1139 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 1139 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1140 newinfo = vmalloc(sizeof(*newinfo) + countersize); 1140 newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
1141 PAGE_KERNEL);
1141 if (!newinfo) 1142 if (!newinfo)
1142 return -ENOMEM; 1143 return -ENOMEM;
1143 1144
1144 if (countersize) 1145 if (countersize)
1145 memset(newinfo->counters, 0, countersize); 1146 memset(newinfo->counters, 0, countersize);
1146 1147
1147 newinfo->entries = vmalloc(tmp.entries_size); 1148 newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
1149 PAGE_KERNEL);
1148 if (!newinfo->entries) { 1150 if (!newinfo->entries) {
1149 ret = -ENOMEM; 1151 ret = -ENOMEM;
1150 goto free_newinfo; 1152 goto free_newinfo;
@@ -2291,9 +2293,12 @@ static int compat_do_replace(struct net *net, void __user *user,
2291 2293
2292 xt_compat_lock(NFPROTO_BRIDGE); 2294 xt_compat_lock(NFPROTO_BRIDGE);
2293 2295
2294 ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); 2296 if (tmp.nentries) {
2295 if (ret < 0) 2297 ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2296 goto out_unlock; 2298 if (ret < 0)
2299 goto out_unlock;
2300 }
2301
2297 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2302 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2298 if (ret < 0) 2303 if (ret < 0)
2299 goto out_unlock; 2304 goto out_unlock;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 08cbed7d940e..419e8edf23ba 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
229 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) 229 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
230 return false; 230 return false;
231 231
232 ip6h = ipv6_hdr(skb);
232 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); 233 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
233 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) 234 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
234 return false; 235 return false;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 0af8f0db892a..79bb8afa9c0c 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -67,6 +67,9 @@
67 */ 67 */
68#define MAX_NFRAMES 256 68#define MAX_NFRAMES 256
69 69
70/* limit timers to 400 days for sending/timeouts */
71#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
72
70/* use of last_frames[index].flags */ 73/* use of last_frames[index].flags */
71#define RX_RECV 0x40 /* received data for this element */ 74#define RX_RECV 0x40 /* received data for this element */
72#define RX_THR 0x80 /* element not been sent due to throttle feature */ 75#define RX_THR 0x80 /* element not been sent due to throttle feature */
@@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
140 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); 143 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
141} 144}
142 145
146/* check limitations for timeval provided by user */
147static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
148{
149 if ((msg_head->ival1.tv_sec < 0) ||
150 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
151 (msg_head->ival1.tv_usec < 0) ||
152 (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
153 (msg_head->ival2.tv_sec < 0) ||
154 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
155 (msg_head->ival2.tv_usec < 0) ||
156 (msg_head->ival2.tv_usec >= USEC_PER_SEC))
157 return true;
158
159 return false;
160}
161
143#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) 162#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
144#define OPSIZ sizeof(struct bcm_op) 163#define OPSIZ sizeof(struct bcm_op)
145#define MHSIZ sizeof(struct bcm_msg_head) 164#define MHSIZ sizeof(struct bcm_msg_head)
@@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
873 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) 892 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
874 return -EINVAL; 893 return -EINVAL;
875 894
895 /* check timeval limitations */
896 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
897 return -EINVAL;
898
876 /* check the given can_id */ 899 /* check the given can_id */
877 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); 900 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
878 if (op) { 901 if (op) {
@@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1053 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1076 (!(msg_head->can_id & CAN_RTR_FLAG))))
1054 return -EINVAL; 1077 return -EINVAL;
1055 1078
1079 /* check timeval limitations */
1080 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1081 return -EINVAL;
1082
1056 /* check the given can_id */ 1083 /* check the given can_id */
1057 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); 1084 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1058 if (op) { 1085 if (op) {
diff --git a/net/can/gw.c b/net/can/gw.c
index faa3da88a127..53859346dc9a 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
416 while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) 416 while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
417 (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); 417 (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
418 418
419 /* check for checksum updates when the CAN frame has been modified */ 419 /* Has the CAN frame been modified? */
420 if (modidx) { 420 if (modidx) {
421 if (gwj->mod.csumfunc.crc8) 421 /* get available space for the processed CAN frame type */
422 int max_len = nskb->len - offsetof(struct can_frame, data);
423
424 /* dlc may have changed, make sure it fits to the CAN frame */
425 if (cf->can_dlc > max_len)
426 goto out_delete;
427
428 /* check for checksum updates in classic CAN length only */
429 if (gwj->mod.csumfunc.crc8) {
430 if (cf->can_dlc > 8)
431 goto out_delete;
432
422 (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); 433 (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
434 }
435
436 if (gwj->mod.csumfunc.xor) {
437 if (cf->can_dlc > 8)
438 goto out_delete;
423 439
424 if (gwj->mod.csumfunc.xor)
425 (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); 440 (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
441 }
426 } 442 }
427 443
428 /* clear the skb timestamp if not configured the other way */ 444 /* clear the skb timestamp if not configured the other way */
@@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
434 gwj->dropped_frames++; 450 gwj->dropped_frames++;
435 else 451 else
436 gwj->handled_frames++; 452 gwj->handled_frames++;
453
454 return;
455
456 out_delete:
457 /* delete frame due to misconfiguration */
458 gwj->deleted_frames++;
459 kfree_skb(nskb);
460 return;
437} 461}
438 462
439static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) 463static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 87afb9ec4c68..9cab80207ced 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -255,6 +255,7 @@ enum {
255 Opt_nocephx_sign_messages, 255 Opt_nocephx_sign_messages,
256 Opt_tcp_nodelay, 256 Opt_tcp_nodelay,
257 Opt_notcp_nodelay, 257 Opt_notcp_nodelay,
258 Opt_abort_on_full,
258}; 259};
259 260
260static match_table_t opt_tokens = { 261static match_table_t opt_tokens = {
@@ -280,6 +281,7 @@ static match_table_t opt_tokens = {
280 {Opt_nocephx_sign_messages, "nocephx_sign_messages"}, 281 {Opt_nocephx_sign_messages, "nocephx_sign_messages"},
281 {Opt_tcp_nodelay, "tcp_nodelay"}, 282 {Opt_tcp_nodelay, "tcp_nodelay"},
282 {Opt_notcp_nodelay, "notcp_nodelay"}, 283 {Opt_notcp_nodelay, "notcp_nodelay"},
284 {Opt_abort_on_full, "abort_on_full"},
283 {-1, NULL} 285 {-1, NULL}
284}; 286};
285 287
@@ -535,6 +537,10 @@ ceph_parse_options(char *options, const char *dev_name,
535 opt->flags &= ~CEPH_OPT_TCP_NODELAY; 537 opt->flags &= ~CEPH_OPT_TCP_NODELAY;
536 break; 538 break;
537 539
540 case Opt_abort_on_full:
541 opt->flags |= CEPH_OPT_ABORT_ON_FULL;
542 break;
543
538 default: 544 default:
539 BUG_ON(token); 545 BUG_ON(token);
540 } 546 }
@@ -549,7 +555,8 @@ out:
549} 555}
550EXPORT_SYMBOL(ceph_parse_options); 556EXPORT_SYMBOL(ceph_parse_options);
551 557
552int ceph_print_client_options(struct seq_file *m, struct ceph_client *client) 558int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
559 bool show_all)
553{ 560{
554 struct ceph_options *opt = client->options; 561 struct ceph_options *opt = client->options;
555 size_t pos = m->count; 562 size_t pos = m->count;
@@ -574,6 +581,8 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
574 seq_puts(m, "nocephx_sign_messages,"); 581 seq_puts(m, "nocephx_sign_messages,");
575 if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0) 582 if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
576 seq_puts(m, "notcp_nodelay,"); 583 seq_puts(m, "notcp_nodelay,");
584 if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL))
585 seq_puts(m, "abort_on_full,");
577 586
578 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) 587 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
579 seq_printf(m, "mount_timeout=%d,", 588 seq_printf(m, "mount_timeout=%d,",
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 02952605d121..46f65709a6ff 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -375,7 +375,7 @@ static int client_options_show(struct seq_file *s, void *p)
375 struct ceph_client *client = s->private; 375 struct ceph_client *client = s->private;
376 int ret; 376 int ret;
377 377
378 ret = ceph_print_client_options(s, client); 378 ret = ceph_print_client_options(s, client, true);
379 if (ret) 379 if (ret)
380 return ret; 380 return ret;
381 381
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index d5718284db57..7e71b0df1fbc 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2058,6 +2058,8 @@ static int process_connect(struct ceph_connection *con)
2058 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 2058 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
2059 2059
2060 if (con->auth) { 2060 if (con->auth) {
2061 int len = le32_to_cpu(con->in_reply.authorizer_len);
2062
2061 /* 2063 /*
2062 * Any connection that defines ->get_authorizer() 2064 * Any connection that defines ->get_authorizer()
2063 * should also define ->add_authorizer_challenge() and 2065 * should also define ->add_authorizer_challenge() and
@@ -2067,8 +2069,7 @@ static int process_connect(struct ceph_connection *con)
2067 */ 2069 */
2068 if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) { 2070 if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
2069 ret = con->ops->add_authorizer_challenge( 2071 ret = con->ops->add_authorizer_challenge(
2070 con, con->auth->authorizer_reply_buf, 2072 con, con->auth->authorizer_reply_buf, len);
2071 le32_to_cpu(con->in_reply.authorizer_len));
2072 if (ret < 0) 2073 if (ret < 0)
2073 return ret; 2074 return ret;
2074 2075
@@ -2078,10 +2079,12 @@ static int process_connect(struct ceph_connection *con)
2078 return 0; 2079 return 0;
2079 } 2080 }
2080 2081
2081 ret = con->ops->verify_authorizer_reply(con); 2082 if (len) {
2082 if (ret < 0) { 2083 ret = con->ops->verify_authorizer_reply(con);
2083 con->error_msg = "bad authorize reply"; 2084 if (ret < 0) {
2084 return ret; 2085 con->error_msg = "bad authorize reply";
2086 return ret;
2087 }
2085 } 2088 }
2086 } 2089 }
2087 2090
@@ -3206,9 +3209,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
3206 dout("con_keepalive %p\n", con); 3209 dout("con_keepalive %p\n", con);
3207 mutex_lock(&con->mutex); 3210 mutex_lock(&con->mutex);
3208 clear_standby(con); 3211 clear_standby(con);
3212 con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
3209 mutex_unlock(&con->mutex); 3213 mutex_unlock(&con->mutex);
3210 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && 3214
3211 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3215 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3212 queue_con(con); 3216 queue_con(con);
3213} 3217}
3214EXPORT_SYMBOL(ceph_con_keepalive); 3218EXPORT_SYMBOL(ceph_con_keepalive);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d23a9f81f3d7..fa9530dd876e 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2315,7 +2315,7 @@ again:
2315 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2315 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2316 pool_full(osdc, req->r_t.base_oloc.pool))) { 2316 pool_full(osdc, req->r_t.base_oloc.pool))) {
2317 dout("req %p full/pool_full\n", req); 2317 dout("req %p full/pool_full\n", req);
2318 if (osdc->abort_on_full) { 2318 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2319 err = -ENOSPC; 2319 err = -ENOSPC;
2320 } else { 2320 } else {
2321 pr_warn_ratelimited("FULL or reached pool quota\n"); 2321 pr_warn_ratelimited("FULL or reached pool quota\n");
@@ -2545,7 +2545,7 @@ static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2545{ 2545{
2546 bool victims = false; 2546 bool victims = false;
2547 2547
2548 if (osdc->abort_on_full && 2548 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2549 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))) 2549 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2550 for_each_request(osdc, abort_on_full_fn, &victims); 2550 for_each_request(osdc, abort_on_full_fn, &victims);
2551} 2551}
diff --git a/net/compat.c b/net/compat.c
index 959d1c51826d..3d348198004f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -388,8 +388,12 @@ static int __compat_sys_setsockopt(int fd, int level, int optname,
388 char __user *optval, unsigned int optlen) 388 char __user *optval, unsigned int optlen)
389{ 389{
390 int err; 390 int err;
391 struct socket *sock = sockfd_lookup(fd, &err); 391 struct socket *sock;
392
393 if (optlen > INT_MAX)
394 return -EINVAL;
392 395
396 sock = sockfd_lookup(fd, &err);
393 if (sock) { 397 if (sock) {
394 err = security_socket_setsockopt(sock, level, optname); 398 err = security_socket_setsockopt(sock, level, optname);
395 if (err) { 399 if (err) {
diff --git a/net/core/dev.c b/net/core/dev.c
index 82f20022259d..5d03889502eb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8152,7 +8152,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
8152 netdev_features_t feature; 8152 netdev_features_t feature;
8153 int feature_bit; 8153 int feature_bit;
8154 8154
8155 for_each_netdev_feature(&upper_disables, feature_bit) { 8155 for_each_netdev_feature(upper_disables, feature_bit) {
8156 feature = __NETIF_F_BIT(feature_bit); 8156 feature = __NETIF_F_BIT(feature_bit);
8157 if (!(upper->wanted_features & feature) 8157 if (!(upper->wanted_features & feature)
8158 && (features & feature)) { 8158 && (features & feature)) {
@@ -8172,7 +8172,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
8172 netdev_features_t feature; 8172 netdev_features_t feature;
8173 int feature_bit; 8173 int feature_bit;
8174 8174
8175 for_each_netdev_feature(&upper_disables, feature_bit) { 8175 for_each_netdev_feature(upper_disables, feature_bit) {
8176 feature = __NETIF_F_BIT(feature_bit); 8176 feature = __NETIF_F_BIT(feature_bit);
8177 if (!(features & feature) && (lower->features & feature)) { 8177 if (!(features & feature) && (lower->features & feature)) {
8178 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 8178 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
@@ -8712,6 +8712,9 @@ int init_dummy_netdev(struct net_device *dev)
8712 set_bit(__LINK_STATE_PRESENT, &dev->state); 8712 set_bit(__LINK_STATE_PRESENT, &dev->state);
8713 set_bit(__LINK_STATE_START, &dev->state); 8713 set_bit(__LINK_STATE_START, &dev->state);
8714 8714
8715 /* napi_busy_loop stats accounting wants this */
8716 dev_net_set(dev, &init_net);
8717
8715 /* Note : We dont allocate pcpu_refcnt for dummy devices, 8718 /* Note : We dont allocate pcpu_refcnt for dummy devices,
8716 * because users of this 'device' dont need to change 8719 * because users of this 'device' dont need to change
8717 * its refcount. 8720 * its refcount.
diff --git a/net/core/filter.c b/net/core/filter.c
index 447dd1bad31f..f7d0004fc160 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2020,18 +2020,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2020static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, 2020static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2021 u32 flags) 2021 u32 flags)
2022{ 2022{
2023 /* skb->mac_len is not set on normal egress */ 2023 unsigned int mlen = skb_network_offset(skb);
2024 unsigned int mlen = skb->network_header - skb->mac_header;
2025 2024
2026 __skb_pull(skb, mlen); 2025 if (mlen) {
2026 __skb_pull(skb, mlen);
2027 2027
2028 /* At ingress, the mac header has already been pulled once. 2028 /* At ingress, the mac header has already been pulled once.
2029 * At egress, skb_pospull_rcsum has to be done in case that 2029 * At egress, skb_pospull_rcsum has to be done in case that
2030 * the skb is originated from ingress (i.e. a forwarded skb) 2030 * the skb is originated from ingress (i.e. a forwarded skb)
2031 * to ensure that rcsum starts at net header. 2031 * to ensure that rcsum starts at net header.
2032 */ 2032 */
2033 if (!skb_at_tc_ingress(skb)) 2033 if (!skb_at_tc_ingress(skb))
2034 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); 2034 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2035 }
2035 skb_pop_mac_header(skb); 2036 skb_pop_mac_header(skb);
2036 skb_reset_mac_len(skb); 2037 skb_reset_mac_len(skb);
2037 return flags & BPF_F_INGRESS ? 2038 return flags & BPF_F_INGRESS ?
@@ -2788,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2788 u32 off = skb_mac_header_len(skb); 2789 u32 off = skb_mac_header_len(skb);
2789 int ret; 2790 int ret;
2790 2791
2791 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2792 if (!skb_is_gso_tcp(skb))
2792 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2793 return -ENOTSUPP; 2793 return -ENOTSUPP;
2794 2794
2795 ret = skb_cow(skb, len_diff); 2795 ret = skb_cow(skb, len_diff);
@@ -2830,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2830 u32 off = skb_mac_header_len(skb); 2830 u32 off = skb_mac_header_len(skb);
2831 int ret; 2831 int ret;
2832 2832
2833 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2833 if (!skb_is_gso_tcp(skb))
2834 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2835 return -ENOTSUPP; 2834 return -ENOTSUPP;
2836 2835
2837 ret = skb_unclone(skb, GFP_ATOMIC); 2836 ret = skb_unclone(skb, GFP_ATOMIC);
@@ -2956,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2956 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2955 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2957 int ret; 2956 int ret;
2958 2957
2959 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2958 if (!skb_is_gso_tcp(skb))
2960 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2961 return -ENOTSUPP; 2959 return -ENOTSUPP;
2962 2960
2963 ret = skb_cow(skb, len_diff); 2961 ret = skb_cow(skb, len_diff);
@@ -2986,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2986 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2984 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2987 int ret; 2985 int ret;
2988 2986
2989 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2987 if (!skb_is_gso_tcp(skb))
2990 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2991 return -ENOTSUPP; 2988 return -ENOTSUPP;
2992 2989
2993 ret = skb_unclone(skb, GFP_ATOMIC); 2990 ret = skb_unclone(skb, GFP_ATOMIC);
@@ -4111,14 +4108,20 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4111 /* Only some socketops are supported */ 4108 /* Only some socketops are supported */
4112 switch (optname) { 4109 switch (optname) {
4113 case SO_RCVBUF: 4110 case SO_RCVBUF:
4111 val = min_t(u32, val, sysctl_rmem_max);
4114 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 4112 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4115 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); 4113 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
4116 break; 4114 break;
4117 case SO_SNDBUF: 4115 case SO_SNDBUF:
4116 val = min_t(u32, val, sysctl_wmem_max);
4118 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 4117 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4119 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); 4118 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4120 break; 4119 break;
4121 case SO_MAX_PACING_RATE: /* 32bit version */ 4120 case SO_MAX_PACING_RATE: /* 32bit version */
4121 if (val != ~0U)
4122 cmpxchg(&sk->sk_pacing_status,
4123 SK_PACING_NONE,
4124 SK_PACING_NEEDED);
4122 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; 4125 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
4123 sk->sk_pacing_rate = min(sk->sk_pacing_rate, 4126 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4124 sk->sk_max_pacing_rate); 4127 sk->sk_max_pacing_rate);
@@ -4132,7 +4135,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4132 sk->sk_rcvlowat = val ? : 1; 4135 sk->sk_rcvlowat = val ? : 1;
4133 break; 4136 break;
4134 case SO_MARK: 4137 case SO_MARK:
4135 sk->sk_mark = val; 4138 if (sk->sk_mark != val) {
4139 sk->sk_mark = val;
4140 sk_dst_reset(sk);
4141 }
4136 break; 4142 break;
4137 default: 4143 default:
4138 ret = -EINVAL; 4144 ret = -EINVAL;
@@ -4203,7 +4209,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4203 /* Only some options are supported */ 4209 /* Only some options are supported */
4204 switch (optname) { 4210 switch (optname) {
4205 case TCP_BPF_IW: 4211 case TCP_BPF_IW:
4206 if (val <= 0 || tp->data_segs_out > 0) 4212 if (val <= 0 || tp->data_segs_out > tp->syn_data)
4207 ret = -EINVAL; 4213 ret = -EINVAL;
4208 else 4214 else
4209 tp->snd_cwnd = val; 4215 tp->snd_cwnd = val;
@@ -5309,7 +5315,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
5309 case BPF_FUNC_trace_printk: 5315 case BPF_FUNC_trace_printk:
5310 if (capable(CAP_SYS_ADMIN)) 5316 if (capable(CAP_SYS_ADMIN))
5311 return bpf_get_trace_printk_proto(); 5317 return bpf_get_trace_printk_proto();
5312 /* else: fall through */ 5318 /* else, fall through */
5313 default: 5319 default:
5314 return NULL; 5320 return NULL;
5315 } 5321 }
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 3e85437f7106..a648568c5e8f 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
63 lwt->name ? : "<unknown>"); 63 lwt->name ? : "<unknown>");
64 ret = BPF_OK; 64 ret = BPF_OK;
65 } else { 65 } else {
66 skb_reset_mac_header(skb);
66 ret = skb_do_redirect(skb); 67 ret = skb_do_redirect(skb);
67 if (ret == 0) 68 if (ret == 0)
68 ret = BPF_REDIRECT; 69 ret = BPF_REDIRECT;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 763a7b08df67..4230400b9a30 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -18,6 +18,7 @@
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 19
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/kmemleak.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/module.h> 24#include <linux/module.h>
@@ -443,12 +444,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
443 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 444 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
444 if (!ret) 445 if (!ret)
445 return NULL; 446 return NULL;
446 if (size <= PAGE_SIZE) 447 if (size <= PAGE_SIZE) {
447 buckets = kzalloc(size, GFP_ATOMIC); 448 buckets = kzalloc(size, GFP_ATOMIC);
448 else 449 } else {
449 buckets = (struct neighbour __rcu **) 450 buckets = (struct neighbour __rcu **)
450 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 451 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
451 get_order(size)); 452 get_order(size));
453 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
454 }
452 if (!buckets) { 455 if (!buckets) {
453 kfree(ret); 456 kfree(ret);
454 return NULL; 457 return NULL;
@@ -468,10 +471,12 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
468 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); 471 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
469 struct neighbour __rcu **buckets = nht->hash_buckets; 472 struct neighbour __rcu **buckets = nht->hash_buckets;
470 473
471 if (size <= PAGE_SIZE) 474 if (size <= PAGE_SIZE) {
472 kfree(buckets); 475 kfree(buckets);
473 else 476 } else {
477 kmemleak_free(buckets);
474 free_pages((unsigned long)buckets, get_order(size)); 478 free_pages((unsigned long)buckets, get_order(size));
479 }
475 kfree(nht); 480 kfree(nht);
476} 481}
477 482
@@ -1002,7 +1007,7 @@ static void neigh_probe(struct neighbour *neigh)
1002 if (neigh->ops->solicit) 1007 if (neigh->ops->solicit)
1003 neigh->ops->solicit(neigh, skb); 1008 neigh->ops->solicit(neigh, skb);
1004 atomic_inc(&neigh->probes); 1009 atomic_inc(&neigh->probes);
1005 kfree_skb(skb); 1010 consume_skb(skb);
1006} 1011}
1007 1012
1008/* Called when a timer expires for a neighbour entry. */ 1013/* Called when a timer expires for a neighbour entry. */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 37317ffec146..2415d9cb9b89 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
356 */ 356 */
357void *netdev_alloc_frag(unsigned int fragsz) 357void *netdev_alloc_frag(unsigned int fragsz)
358{ 358{
359 fragsz = SKB_DATA_ALIGN(fragsz);
360
359 return __netdev_alloc_frag(fragsz, GFP_ATOMIC); 361 return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
360} 362}
361EXPORT_SYMBOL(netdev_alloc_frag); 363EXPORT_SYMBOL(netdev_alloc_frag);
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
369 371
370void *napi_alloc_frag(unsigned int fragsz) 372void *napi_alloc_frag(unsigned int fragsz)
371{ 373{
374 fragsz = SKB_DATA_ALIGN(fragsz);
375
372 return __napi_alloc_frag(fragsz, GFP_ATOMIC); 376 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
373} 377}
374EXPORT_SYMBOL(napi_alloc_frag); 378EXPORT_SYMBOL(napi_alloc_frag);
@@ -5270,7 +5274,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5270 unsigned long chunk; 5274 unsigned long chunk;
5271 struct sk_buff *skb; 5275 struct sk_buff *skb;
5272 struct page *page; 5276 struct page *page;
5273 gfp_t gfp_head;
5274 int i; 5277 int i;
5275 5278
5276 *errcode = -EMSGSIZE; 5279 *errcode = -EMSGSIZE;
@@ -5280,12 +5283,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5280 if (npages > MAX_SKB_FRAGS) 5283 if (npages > MAX_SKB_FRAGS)
5281 return NULL; 5284 return NULL;
5282 5285
5283 gfp_head = gfp_mask;
5284 if (gfp_head & __GFP_DIRECT_RECLAIM)
5285 gfp_head |= __GFP_RETRY_MAYFAIL;
5286
5287 *errcode = -ENOBUFS; 5286 *errcode = -ENOBUFS;
5288 skb = alloc_skb(header_len, gfp_head); 5287 skb = alloc_skb(header_len, gfp_mask);
5289 if (!skb) 5288 if (!skb)
5290 return NULL; 5289 return NULL;
5291 5290
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index d6d5c20d7044..8c826603bf36 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
545 struct sk_psock *psock = container_of(gc, struct sk_psock, gc); 545 struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
546 546
547 /* No sk_callback_lock since already detached. */ 547 /* No sk_callback_lock since already detached. */
548 if (psock->parser.enabled) 548 strp_done(&psock->parser.strp);
549 strp_done(&psock->parser.strp);
550 549
551 cancel_work_sync(&psock->work); 550 cancel_work_sync(&psock->work);
552 551
diff --git a/net/core/sock.c b/net/core/sock.c
index 6aa2e7e0b4fb..bc3512f230a3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2380,7 +2380,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2380 } 2380 }
2381 2381
2382 if (sk_has_memory_pressure(sk)) { 2382 if (sk_has_memory_pressure(sk)) {
2383 int alloc; 2383 u64 alloc;
2384 2384
2385 if (!sk_under_memory_pressure(sk)) 2385 if (!sk_under_memory_pressure(sk))
2386 return 1; 2386 return 1;
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 6eb837a47b5c..baaaeb2b2c42 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
202static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, 202static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
203 u8 pkt, u8 opt, u8 *val, u8 len) 203 u8 pkt, u8 opt, u8 *val, u8 len)
204{ 204{
205 if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL) 205 if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
206 return 0; 206 return 0;
207 return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); 207 return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
208} 208}
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
214static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, 214static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
215 u8 pkt, u8 opt, u8 *val, u8 len) 215 u8 pkt, u8 opt, u8 *val, u8 len)
216{ 216{
217 if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL) 217 if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
218 return 0; 218 return 0;
219 return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); 219 return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
220} 220}
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index d0b3e69c6b39..0962f9201baa 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -56,7 +56,7 @@
56#include <net/dn_neigh.h> 56#include <net/dn_neigh.h>
57#include <net/dn_fib.h> 57#include <net/dn_fib.h>
58 58
59#define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn)) 59#define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn))
60 60
61static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; 61static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
62static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00}; 62static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 71bb15f491c8..54f5551fb799 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -205,6 +205,8 @@ static void dsa_master_reset_mtu(struct net_device *dev)
205 rtnl_unlock(); 205 rtnl_unlock();
206} 206}
207 207
208static struct lock_class_key dsa_master_addr_list_lock_key;
209
208int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) 210int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
209{ 211{
210 int ret; 212 int ret;
@@ -218,6 +220,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
218 wmb(); 220 wmb();
219 221
220 dev->dsa_ptr = cpu_dp; 222 dev->dsa_ptr = cpu_dp;
223 lockdep_set_class(&dev->addr_list_lock,
224 &dsa_master_addr_list_lock_key);
221 225
222 ret = dsa_master_ethtool_setup(dev); 226 ret = dsa_master_ethtool_setup(dev);
223 if (ret) 227 if (ret)
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 2d7e01b23572..2a2a878b5ce3 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
69 69
70int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 70int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
71{ 71{
72 u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING;
73 struct dsa_switch *ds = dp->ds; 72 struct dsa_switch *ds = dp->ds;
74 int port = dp->index; 73 int port = dp->index;
75 int err; 74 int err;
@@ -80,7 +79,8 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
80 return err; 79 return err;
81 } 80 }
82 81
83 dsa_port_set_state_now(dp, stp_state); 82 if (!dp->bridge_dev)
83 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
84 84
85 return 0; 85 return 0;
86} 86}
@@ -90,7 +90,8 @@ void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy)
90 struct dsa_switch *ds = dp->ds; 90 struct dsa_switch *ds = dp->ds;
91 int port = dp->index; 91 int port = dp->index;
92 92
93 dsa_port_set_state_now(dp, BR_STATE_DISABLED); 93 if (!dp->bridge_dev)
94 dsa_port_set_state_now(dp, BR_STATE_DISABLED);
94 95
95 if (ds->ops->port_disable) 96 if (ds->ops->port_disable)
96 ds->ops->port_disable(ds, port, phy); 97 ds->ops->port_disable(ds, port, phy);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3fcc1d01615..a1c9fe155057 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev)
140static void dsa_slave_change_rx_flags(struct net_device *dev, int change) 140static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
141{ 141{
142 struct net_device *master = dsa_slave_to_master(dev); 142 struct net_device *master = dsa_slave_to_master(dev);
143 143 if (dev->flags & IFF_UP) {
144 if (change & IFF_ALLMULTI) 144 if (change & IFF_ALLMULTI)
145 dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); 145 dev_set_allmulti(master,
146 if (change & IFF_PROMISC) 146 dev->flags & IFF_ALLMULTI ? 1 : -1);
147 dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1); 147 if (change & IFF_PROMISC)
148 dev_set_promiscuity(master,
149 dev->flags & IFF_PROMISC ? 1 : -1);
150 }
148} 151}
149 152
150static void dsa_slave_set_rx_mode(struct net_device *dev) 153static void dsa_slave_set_rx_mode(struct net_device *dev)
@@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
639 int ret; 642 int ret;
640 643
641 /* Port's PHY and MAC both need to be EEE capable */ 644 /* Port's PHY and MAC both need to be EEE capable */
642 if (!dev->phydev && !dp->pl) 645 if (!dev->phydev || !dp->pl)
643 return -ENODEV; 646 return -ENODEV;
644 647
645 if (!ds->ops->set_mac_eee) 648 if (!ds->ops->set_mac_eee)
@@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
659 int ret; 662 int ret;
660 663
661 /* Port's PHY and MAC both need to be EEE capable */ 664 /* Port's PHY and MAC both need to be EEE capable */
662 if (!dev->phydev && !dp->pl) 665 if (!dev->phydev || !dp->pl)
663 return -ENODEV; 666 return -ENODEV;
664 667
665 if (!ds->ops->get_mac_eee) 668 if (!ds->ops->get_mac_eee)
diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c
index 5e04ed25bc0e..1e976bb93d99 100644
--- a/net/ipv4/bpfilter/sockopt.c
+++ b/net/ipv4/bpfilter/sockopt.c
@@ -1,28 +1,54 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <linux/module.h>
2#include <linux/uaccess.h> 4#include <linux/uaccess.h>
3#include <linux/bpfilter.h> 5#include <linux/bpfilter.h>
4#include <uapi/linux/bpf.h> 6#include <uapi/linux/bpf.h>
5#include <linux/wait.h> 7#include <linux/wait.h>
6#include <linux/kmod.h> 8#include <linux/kmod.h>
9#include <linux/fs.h>
10#include <linux/file.h>
7 11
8int (*bpfilter_process_sockopt)(struct sock *sk, int optname, 12struct bpfilter_umh_ops bpfilter_ops;
9 char __user *optval, 13EXPORT_SYMBOL_GPL(bpfilter_ops);
10 unsigned int optlen, bool is_set); 14
11EXPORT_SYMBOL_GPL(bpfilter_process_sockopt); 15static void bpfilter_umh_cleanup(struct umh_info *info)
16{
17 mutex_lock(&bpfilter_ops.lock);
18 bpfilter_ops.stop = true;
19 fput(info->pipe_to_umh);
20 fput(info->pipe_from_umh);
21 info->pid = 0;
22 mutex_unlock(&bpfilter_ops.lock);
23}
12 24
13static int bpfilter_mbox_request(struct sock *sk, int optname, 25static int bpfilter_mbox_request(struct sock *sk, int optname,
14 char __user *optval, 26 char __user *optval,
15 unsigned int optlen, bool is_set) 27 unsigned int optlen, bool is_set)
16{ 28{
17 if (!bpfilter_process_sockopt) { 29 int err;
18 int err = request_module("bpfilter"); 30 mutex_lock(&bpfilter_ops.lock);
31 if (!bpfilter_ops.sockopt) {
32 mutex_unlock(&bpfilter_ops.lock);
33 err = request_module("bpfilter");
34 mutex_lock(&bpfilter_ops.lock);
19 35
20 if (err) 36 if (err)
21 return err; 37 goto out;
22 if (!bpfilter_process_sockopt) 38 if (!bpfilter_ops.sockopt) {
23 return -ECHILD; 39 err = -ECHILD;
40 goto out;
41 }
42 }
43 if (bpfilter_ops.stop) {
44 err = bpfilter_ops.start();
45 if (err)
46 goto out;
24 } 47 }
25 return bpfilter_process_sockopt(sk, optname, optval, optlen, is_set); 48 err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set);
49out:
50 mutex_unlock(&bpfilter_ops.lock);
51 return err;
26} 52}
27 53
28int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, 54int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
@@ -41,3 +67,15 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
41 67
42 return bpfilter_mbox_request(sk, optname, optval, len, false); 68 return bpfilter_mbox_request(sk, optname, optval, len, false);
43} 69}
70
71static int __init bpfilter_sockopt_init(void)
72{
73 mutex_init(&bpfilter_ops.lock);
74 bpfilter_ops.stop = true;
75 bpfilter_ops.info.cmdline = "bpfilter_umh";
76 bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup;
77
78 return 0;
79}
80
81module_init(bpfilter_sockopt_init);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 04ba321ae5ce..e258a00b4a3d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1826,7 +1826,7 @@ put_tgt_net:
1826 if (fillargs.netnsid >= 0) 1826 if (fillargs.netnsid >= 0)
1827 put_net(tgt_net); 1827 put_net(tgt_net);
1828 1828
1829 return err < 0 ? err : skb->len; 1829 return skb->len ? : err;
1830} 1830}
1831 1831
1832static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, 1832static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 5459f41fc26f..10e809b296ec 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
328 skb->len += tailen; 328 skb->len += tailen;
329 skb->data_len += tailen; 329 skb->data_len += tailen;
330 skb->truesize += tailen; 330 skb->truesize += tailen;
331 if (sk) 331 if (sk && sk_fullsock(sk))
332 refcount_add(tailen, &sk->sk_wmem_alloc); 332 refcount_add(tailen, &sk->sk_wmem_alloc);
333 333
334 goto out; 334 goto out;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6df95be96311..fe4f6a624238 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -203,7 +203,7 @@ static void fib_flush(struct net *net)
203 struct fib_table *tb; 203 struct fib_table *tb;
204 204
205 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) 205 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
206 flushed += fib_table_flush(net, tb); 206 flushed += fib_table_flush(net, tb, false);
207 } 207 }
208 208
209 if (flushed) 209 if (flushed)
@@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net)
1463 1463
1464 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { 1464 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
1465 hlist_del(&tb->tb_hlist); 1465 hlist_del(&tb->tb_hlist);
1466 fib_table_flush(net, tb); 1466 fib_table_flush(net, tb, true);
1467 fib_free_table(tb); 1467 fib_free_table(tb);
1468 } 1468 }
1469 } 1469 }
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 237c9f72b265..a573e37e0615 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb)
1856} 1856}
1857 1857
1858/* Caller must hold RTNL. */ 1858/* Caller must hold RTNL. */
1859int fib_table_flush(struct net *net, struct fib_table *tb) 1859int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
1860{ 1860{
1861 struct trie *t = (struct trie *)tb->tb_data; 1861 struct trie *t = (struct trie *)tb->tb_data;
1862 struct key_vector *pn = t->kv; 1862 struct key_vector *pn = t->kv;
@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
1904 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { 1904 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
1905 struct fib_info *fi = fa->fa_info; 1905 struct fib_info *fi = fa->fa_info;
1906 1906
1907 if (!fi || !(fi->fib_flags & RTNH_F_DEAD) || 1907 if (!fi || tb->tb_id != fa->tb_id ||
1908 tb->tb_id != fa->tb_id) { 1908 (!(fi->fib_flags & RTNH_F_DEAD) &&
1909 !fib_props[fa->fa_type].error)) {
1910 slen = fa->fa_slen;
1911 continue;
1912 }
1913
1914 /* Do not flush error routes if network namespace is
1915 * not being dismantled
1916 */
1917 if (!flush_all && fib_props[fa->fa_type].error) {
1909 slen = fa->fa_slen; 1918 slen = fa->fa_slen;
1910 continue; 1919 continue;
1911 } 1920 }
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 0c9f171fb085..437070d1ffb1 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -1020,10 +1020,11 @@ static int gue_err(struct sk_buff *skb, u32 info)
1020{ 1020{
1021 int transport_offset = skb_transport_offset(skb); 1021 int transport_offset = skb_transport_offset(skb);
1022 struct guehdr *guehdr; 1022 struct guehdr *guehdr;
1023 size_t optlen; 1023 size_t len, optlen;
1024 int ret; 1024 int ret;
1025 1025
1026 if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) 1026 len = sizeof(struct udphdr) + sizeof(struct guehdr);
1027 if (!pskb_may_pull(skb, len))
1027 return -EINVAL; 1028 return -EINVAL;
1028 1029
1029 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; 1030 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -1058,6 +1059,10 @@ static int gue_err(struct sk_buff *skb, u32 info)
1058 1059
1059 optlen = guehdr->hlen << 2; 1060 optlen = guehdr->hlen << 2;
1060 1061
1062 if (!pskb_may_pull(skb, len + optlen))
1063 return -EINVAL;
1064
1065 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
1061 if (validate_gue_flags(guehdr, optlen)) 1066 if (validate_gue_flags(guehdr, optlen))
1062 return -EINVAL; 1067 return -EINVAL;
1063 1068
@@ -1065,7 +1070,8 @@ static int gue_err(struct sk_buff *skb, u32 info)
1065 * recursion. Besides, this kind of encapsulation can't even be 1070 * recursion. Besides, this kind of encapsulation can't even be
1066 * configured currently. Discard this. 1071 * configured currently. Discard this.
1067 */ 1072 */
1068 if (guehdr->proto_ctype == IPPROTO_UDP) 1073 if (guehdr->proto_ctype == IPPROTO_UDP ||
1074 guehdr->proto_ctype == IPPROTO_UDPLITE)
1069 return -EOPNOTSUPP; 1075 return -EOPNOTSUPP;
1070 1076
1071 skb_set_transport_header(skb, -(int)sizeof(struct icmphdr)); 1077 skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index a4bf22ee3aed..7c4a41dc04bb 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -25,6 +25,7 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <net/protocol.h> 26#include <net/protocol.h>
27#include <net/gre.h> 27#include <net/gre.h>
28#include <net/erspan.h>
28 29
29#include <net/icmp.h> 30#include <net/icmp.h>
30#include <net/route.h> 31#include <net/route.h>
@@ -119,6 +120,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
119 hdr_len += 4; 120 hdr_len += 4;
120 } 121 }
121 tpi->hdr_len = hdr_len; 122 tpi->hdr_len = hdr_len;
123
124 /* ERSPAN ver 1 and 2 protocol sets GRE key field
125 * to 0 and sets the configured key in the
126 * inner erspan header field
127 */
128 if (greh->protocol == htons(ETH_P_ERSPAN) ||
129 greh->protocol == htons(ETH_P_ERSPAN2)) {
130 struct erspan_base_hdr *ershdr;
131
132 if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
133 return -EINVAL;
134
135 ershdr = (struct erspan_base_hdr *)options;
136 tpi->key = cpu_to_be32(get_session_id(ershdr));
137 }
138
122 return hdr_len; 139 return hdr_len;
123} 140}
124EXPORT_SYMBOL(gre_parse_header); 141EXPORT_SYMBOL(gre_parse_header);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 1a4e9ff02762..5731670c560b 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
108 + nla_total_size(1) /* INET_DIAG_TOS */ 108 + nla_total_size(1) /* INET_DIAG_TOS */
109 + nla_total_size(1) /* INET_DIAG_TCLASS */ 109 + nla_total_size(1) /* INET_DIAG_TCLASS */
110 + nla_total_size(4) /* INET_DIAG_MARK */ 110 + nla_total_size(4) /* INET_DIAG_MARK */
111 + nla_total_size(4) /* INET_DIAG_CLASS_ID */
111 + nla_total_size(sizeof(struct inet_diag_meminfo)) 112 + nla_total_size(sizeof(struct inet_diag_meminfo))
112 + nla_total_size(sizeof(struct inet_diag_msg)) 113 + nla_total_size(sizeof(struct inet_diag_msg))
113 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) 114 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
287 goto errout; 288 goto errout;
288 } 289 }
289 290
290 if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) { 291 if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
292 ext & (1 << (INET_DIAG_TCLASS - 1))) {
291 u32 classid = 0; 293 u32 classid = 0;
292 294
293#ifdef CONFIG_SOCK_CGROUP_DATA 295#ifdef CONFIG_SOCK_CGROUP_DATA
294 classid = sock_cgroup_classid(&sk->sk_cgrp_data); 296 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
295#endif 297#endif
298 /* Fallback to socket priority if class id isn't set.
299 * Classful qdiscs use it as direct reference to class.
300 * For cgroup2 classid is always zero.
301 */
302 if (!classid)
303 classid = sk->sk_priority;
296 304
297 if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) 305 if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
298 goto errout; 306 goto errout;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d757b9642d0d..be778599bfed 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
216 atomic_set(&p->rid, 0); 216 atomic_set(&p->rid, 0);
217 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 217 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
218 p->rate_tokens = 0; 218 p->rate_tokens = 0;
219 p->n_redirects = 0;
219 /* 60*HZ is arbitrary, but chosen enough high so that the first 220 /* 60*HZ is arbitrary, but chosen enough high so that the first
220 * calculation of tokens is at its maximum. 221 * calculation of tokens is at its maximum.
221 */ 222 */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d1d09f3e5f9e..6ae89f2b541b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -268,20 +268,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
268 int len; 268 int len;
269 269
270 itn = net_generic(net, erspan_net_id); 270 itn = net_generic(net, erspan_net_id);
271 len = gre_hdr_len + sizeof(*ershdr);
272
273 /* Check based hdr len */
274 if (unlikely(!pskb_may_pull(skb, len)))
275 return PACKET_REJECT;
276 271
277 iph = ip_hdr(skb); 272 iph = ip_hdr(skb);
278 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); 273 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
279 ver = ershdr->ver; 274 ver = ershdr->ver;
280 275
281 /* The original GRE header does not have key field,
282 * Use ERSPAN 10-bit session ID as key.
283 */
284 tpi->key = cpu_to_be32(get_session_id(ershdr));
285 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, 276 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
286 tpi->flags | TUNNEL_KEY, 277 tpi->flags | TUNNEL_KEY,
287 iph->saddr, iph->daddr, tpi->key); 278 iph->saddr, iph->daddr, tpi->key);
@@ -569,8 +560,7 @@ err_free_skb:
569 dev->stats.tx_dropped++; 560 dev->stats.tx_dropped++;
570} 561}
571 562
572static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, 563static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
573 __be16 proto)
574{ 564{
575 struct ip_tunnel *tunnel = netdev_priv(dev); 565 struct ip_tunnel *tunnel = netdev_priv(dev);
576 struct ip_tunnel_info *tun_info; 566 struct ip_tunnel_info *tun_info;
@@ -578,10 +568,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
578 struct erspan_metadata *md; 568 struct erspan_metadata *md;
579 struct rtable *rt = NULL; 569 struct rtable *rt = NULL;
580 bool truncate = false; 570 bool truncate = false;
571 __be16 df, proto;
581 struct flowi4 fl; 572 struct flowi4 fl;
582 int tunnel_hlen; 573 int tunnel_hlen;
583 int version; 574 int version;
584 __be16 df;
585 int nhoff; 575 int nhoff;
586 int thoff; 576 int thoff;
587 577
@@ -626,18 +616,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
626 if (version == 1) { 616 if (version == 1) {
627 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), 617 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
628 ntohl(md->u.index), truncate, true); 618 ntohl(md->u.index), truncate, true);
619 proto = htons(ETH_P_ERSPAN);
629 } else if (version == 2) { 620 } else if (version == 2) {
630 erspan_build_header_v2(skb, 621 erspan_build_header_v2(skb,
631 ntohl(tunnel_id_to_key32(key->tun_id)), 622 ntohl(tunnel_id_to_key32(key->tun_id)),
632 md->u.md2.dir, 623 md->u.md2.dir,
633 get_hwid(&md->u.md2), 624 get_hwid(&md->u.md2),
634 truncate, true); 625 truncate, true);
626 proto = htons(ETH_P_ERSPAN2);
635 } else { 627 } else {
636 goto err_free_rt; 628 goto err_free_rt;
637 } 629 }
638 630
639 gre_build_header(skb, 8, TUNNEL_SEQ, 631 gre_build_header(skb, 8, TUNNEL_SEQ,
640 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++)); 632 proto, 0, htonl(tunnel->o_seqno++));
641 633
642 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 634 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
643 635
@@ -721,12 +713,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
721{ 713{
722 struct ip_tunnel *tunnel = netdev_priv(dev); 714 struct ip_tunnel *tunnel = netdev_priv(dev);
723 bool truncate = false; 715 bool truncate = false;
716 __be16 proto;
724 717
725 if (!pskb_inet_may_pull(skb)) 718 if (!pskb_inet_may_pull(skb))
726 goto free_skb; 719 goto free_skb;
727 720
728 if (tunnel->collect_md) { 721 if (tunnel->collect_md) {
729 erspan_fb_xmit(skb, dev, skb->protocol); 722 erspan_fb_xmit(skb, dev);
730 return NETDEV_TX_OK; 723 return NETDEV_TX_OK;
731 } 724 }
732 725
@@ -742,19 +735,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
742 } 735 }
743 736
744 /* Push ERSPAN header */ 737 /* Push ERSPAN header */
745 if (tunnel->erspan_ver == 1) 738 if (tunnel->erspan_ver == 1) {
746 erspan_build_header(skb, ntohl(tunnel->parms.o_key), 739 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
747 tunnel->index, 740 tunnel->index,
748 truncate, true); 741 truncate, true);
749 else if (tunnel->erspan_ver == 2) 742 proto = htons(ETH_P_ERSPAN);
743 } else if (tunnel->erspan_ver == 2) {
750 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), 744 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
751 tunnel->dir, tunnel->hwid, 745 tunnel->dir, tunnel->hwid,
752 truncate, true); 746 truncate, true);
753 else 747 proto = htons(ETH_P_ERSPAN2);
748 } else {
754 goto free_skb; 749 goto free_skb;
750 }
755 751
756 tunnel->parms.o_flags &= ~TUNNEL_KEY; 752 tunnel->parms.o_flags &= ~TUNNEL_KEY;
757 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); 753 __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
758 return NETDEV_TX_OK; 754 return NETDEV_TX_OK;
759 755
760free_skb: 756free_skb:
@@ -1459,12 +1455,31 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1459{ 1455{
1460 struct ip_tunnel *t = netdev_priv(dev); 1456 struct ip_tunnel *t = netdev_priv(dev);
1461 struct ip_tunnel_parm *p = &t->parms; 1457 struct ip_tunnel_parm *p = &t->parms;
1458 __be16 o_flags = p->o_flags;
1459
1460 if (t->erspan_ver == 1 || t->erspan_ver == 2) {
1461 if (!t->collect_md)
1462 o_flags |= TUNNEL_KEY;
1463
1464 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1465 goto nla_put_failure;
1466
1467 if (t->erspan_ver == 1) {
1468 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1469 goto nla_put_failure;
1470 } else {
1471 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1472 goto nla_put_failure;
1473 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1474 goto nla_put_failure;
1475 }
1476 }
1462 1477
1463 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1478 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1464 nla_put_be16(skb, IFLA_GRE_IFLAGS, 1479 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1465 gre_tnl_flags_to_gre_flags(p->i_flags)) || 1480 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1466 nla_put_be16(skb, IFLA_GRE_OFLAGS, 1481 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1467 gre_tnl_flags_to_gre_flags(p->o_flags)) || 1482 gre_tnl_flags_to_gre_flags(o_flags)) ||
1468 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1483 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1469 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1484 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1470 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || 1485 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
@@ -1494,19 +1509,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1494 goto nla_put_failure; 1509 goto nla_put_failure;
1495 } 1510 }
1496 1511
1497 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1498 goto nla_put_failure;
1499
1500 if (t->erspan_ver == 1) {
1501 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1502 goto nla_put_failure;
1503 } else if (t->erspan_ver == 2) {
1504 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1505 goto nla_put_failure;
1506 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1507 goto nla_put_failure;
1508 }
1509
1510 return 0; 1512 return 0;
1511 1513
1512nla_put_failure: 1514nla_put_failure:
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 26921f6b3b92..51d8efba6de2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -488,6 +488,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
488 goto drop; 488 goto drop;
489 } 489 }
490 490
491 iph = ip_hdr(skb);
491 skb->transport_header = skb->network_header + iph->ihl*4; 492 skb->transport_header = skb->network_header + iph->ihl*4;
492 493
493 /* Remove any debris in the socket control block */ 494 /* Remove any debris in the socket control block */
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index fffcc130900e..82f341e84fae 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
148 148
149static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 149static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150{ 150{
151 __be16 _ports[2], *ports;
151 struct sockaddr_in sin; 152 struct sockaddr_in sin;
152 __be16 *ports;
153 int end;
154
155 end = skb_transport_offset(skb) + 4;
156 if (end > 0 && !pskb_may_pull(skb, end))
157 return;
158 153
159 /* All current transport protocols have the port numbers in the 154 /* All current transport protocols have the port numbers in the
160 * first four bytes of the transport header and this function is 155 * first four bytes of the transport header and this function is
161 * written with this assumption in mind. 156 * written with this assumption in mind.
162 */ 157 */
163 ports = (__be16 *)skb_transport_header(skb); 158 ports = skb_header_pointer(skb, skb_transport_offset(skb),
159 sizeof(_ports), &_ports);
160 if (!ports)
161 return;
164 162
165 sin.sin_family = AF_INET; 163 sin.sin_family = AF_INET;
166 sin.sin_addr.s_addr = ip_hdr(skb)->daddr; 164 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c4f5602308ed..054d01c16dc6 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -644,13 +644,19 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
644 dst = tnl_params->daddr; 644 dst = tnl_params->daddr;
645 if (dst == 0) { 645 if (dst == 0) {
646 /* NBMA tunnel */ 646 /* NBMA tunnel */
647 struct ip_tunnel_info *tun_info;
647 648
648 if (!skb_dst(skb)) { 649 if (!skb_dst(skb)) {
649 dev->stats.tx_fifo_errors++; 650 dev->stats.tx_fifo_errors++;
650 goto tx_error; 651 goto tx_error;
651 } 652 }
652 653
653 if (skb->protocol == htons(ETH_P_IP)) { 654 tun_info = skb_tunnel_info(skb);
655 if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
656 ip_tunnel_info_af(tun_info) == AF_INET &&
657 tun_info->key.u.ipv4.dst)
658 dst = tun_info->key.u.ipv4.dst;
659 else if (skb->protocol == htons(ETH_P_IP)) {
654 rt = skb_rtable(skb); 660 rt = skb_rtable(skb);
655 dst = rt_nexthop(rt, inner_iph->daddr); 661 dst = rt_nexthop(rt, inner_iph->daddr);
656 } 662 }
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index d7b43e700023..68a21bf75dd0 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -74,6 +74,33 @@ drop:
74 return 0; 74 return 0;
75} 75}
76 76
77static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
78 int encap_type)
79{
80 struct ip_tunnel *tunnel;
81 const struct iphdr *iph = ip_hdr(skb);
82 struct net *net = dev_net(skb->dev);
83 struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
84
85 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
86 iph->saddr, iph->daddr, 0);
87 if (tunnel) {
88 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
89 goto drop;
90
91 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
92
93 skb->dev = tunnel->dev;
94
95 return xfrm_input(skb, nexthdr, spi, encap_type);
96 }
97
98 return -EINVAL;
99drop:
100 kfree_skb(skb);
101 return 0;
102}
103
77static int vti_rcv(struct sk_buff *skb) 104static int vti_rcv(struct sk_buff *skb)
78{ 105{
79 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 106 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
@@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb)
82 return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); 109 return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
83} 110}
84 111
112static int vti_rcv_ipip(struct sk_buff *skb)
113{
114 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
115 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
116
117 return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
118}
119
85static int vti_rcv_cb(struct sk_buff *skb, int err) 120static int vti_rcv_cb(struct sk_buff *skb, int err)
86{ 121{
87 unsigned short family; 122 unsigned short family;
@@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
435 .priority = 100, 470 .priority = 100,
436}; 471};
437 472
473static struct xfrm_tunnel ipip_handler __read_mostly = {
474 .handler = vti_rcv_ipip,
475 .err_handler = vti4_err,
476 .priority = 0,
477};
478
438static int __net_init vti_init_net(struct net *net) 479static int __net_init vti_init_net(struct net *net)
439{ 480{
440 int err; 481 int err;
@@ -603,6 +644,13 @@ static int __init vti_init(void)
603 if (err < 0) 644 if (err < 0)
604 goto xfrm_proto_comp_failed; 645 goto xfrm_proto_comp_failed;
605 646
647 msg = "ipip tunnel";
648 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
649 if (err < 0) {
650 pr_info("%s: cant't register tunnel\n",__func__);
651 goto xfrm_tunnel_failed;
652 }
653
606 msg = "netlink interface"; 654 msg = "netlink interface";
607 err = rtnl_link_register(&vti_link_ops); 655 err = rtnl_link_register(&vti_link_ops);
608 if (err < 0) 656 if (err < 0)
@@ -612,6 +660,8 @@ static int __init vti_init(void)
612 660
613rtnl_link_failed: 661rtnl_link_failed:
614 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); 662 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
663xfrm_tunnel_failed:
664 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
615xfrm_proto_comp_failed: 665xfrm_proto_comp_failed:
616 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 666 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
617xfrm_proto_ah_failed: 667xfrm_proto_ah_failed:
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index b61977db9b7f..2a909e5f9ba0 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -846,9 +846,9 @@ static int clusterip_net_init(struct net *net)
846 846
847static void clusterip_net_exit(struct net *net) 847static void clusterip_net_exit(struct net *net)
848{ 848{
849#ifdef CONFIG_PROC_FS
849 struct clusterip_net *cn = clusterip_pernet(net); 850 struct clusterip_net *cn = clusterip_pernet(net);
850 851
851#ifdef CONFIG_PROC_FS
852 mutex_lock(&cn->mutex); 852 mutex_lock(&cn->mutex);
853 proc_remove(cn->procdir); 853 proc_remove(cn->procdir);
854 cn->procdir = NULL; 854 cn->procdir = NULL;
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 2687db015b6f..fa2ba7c500e4 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -215,6 +215,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
215 215
216 /* Change outer to look like the reply to an incoming packet */ 216 /* Change outer to look like the reply to an incoming packet */
217 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 217 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
218 target.dst.protonum = IPPROTO_ICMP;
218 if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) 219 if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
219 return 0; 220 return 0;
220 221
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
index a0aa13bcabda..0a8a60c1bf9a 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
@@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
105int snmp_version(void *context, size_t hdrlen, unsigned char tag, 105int snmp_version(void *context, size_t hdrlen, unsigned char tag,
106 const void *data, size_t datalen) 106 const void *data, size_t datalen)
107{ 107{
108 if (datalen != 1)
109 return -EINVAL;
108 if (*(unsigned char *)data > 1) 110 if (*(unsigned char *)data > 1)
109 return -ENOTSUPP; 111 return -ENOTSUPP;
110 return 1; 112 return 1;
@@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
114 const void *data, size_t datalen) 116 const void *data, size_t datalen)
115{ 117{
116 struct snmp_ctx *ctx = (struct snmp_ctx *)context; 118 struct snmp_ctx *ctx = (struct snmp_ctx *)context;
117 __be32 *pdata = (__be32 *)data; 119 __be32 *pdata;
118 120
121 if (datalen != 4)
122 return -EINVAL;
123 pdata = (__be32 *)data;
119 if (*pdata == ctx->from) { 124 if (*pdata == ctx->from) {
120 pr_debug("%s: %pI4 to %pI4\n", __func__, 125 pr_debug("%s: %pI4 to %pI4\n", __func__,
121 (void *)&ctx->from, (void *)&ctx->to); 126 (void *)&ctx->from, (void *)&ctx->to);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ce92f73cf104..5163b64f8fb3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
887 /* No redirected packets during ip_rt_redirect_silence; 887 /* No redirected packets during ip_rt_redirect_silence;
888 * reset the algorithm. 888 * reset the algorithm.
889 */ 889 */
890 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) 890 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
891 peer->rate_tokens = 0; 891 peer->rate_tokens = 0;
892 peer->n_redirects = 0;
893 }
892 894
893 /* Too many ignored redirects; do not send anything 895 /* Too many ignored redirects; do not send anything
894 * set dst.rate_last to the last seen redirected packet. 896 * set dst.rate_last to the last seen redirected packet.
895 */ 897 */
896 if (peer->rate_tokens >= ip_rt_redirect_number) { 898 if (peer->n_redirects >= ip_rt_redirect_number) {
897 peer->rate_last = jiffies; 899 peer->rate_last = jiffies;
898 goto out_put_peer; 900 goto out_put_peer;
899 } 901 }
@@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
910 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); 912 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
911 peer->rate_last = jiffies; 913 peer->rate_last = jiffies;
912 ++peer->rate_tokens; 914 ++peer->rate_tokens;
915 ++peer->n_redirects;
913#ifdef CONFIG_IP_ROUTE_VERBOSE 916#ifdef CONFIG_IP_ROUTE_VERBOSE
914 if (log_martians && 917 if (log_martians &&
915 peer->rate_tokens == ip_rt_redirect_number) 918 peer->rate_tokens == ip_rt_redirect_number)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 27e2f6837062..cf3c5095c10e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
1186 flags = msg->msg_flags; 1186 flags = msg->msg_flags;
1187 1187
1188 if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { 1188 if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
1189 if (sk->sk_state != TCP_ESTABLISHED) { 1189 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1190 err = -EINVAL; 1190 err = -EINVAL;
1191 goto out_err; 1191 goto out_err;
1192 } 1192 }
@@ -2528,6 +2528,7 @@ void tcp_write_queue_purge(struct sock *sk)
2528 sk_mem_reclaim(sk); 2528 sk_mem_reclaim(sk);
2529 tcp_clear_all_retrans_hints(tcp_sk(sk)); 2529 tcp_clear_all_retrans_hints(tcp_sk(sk));
2530 tcp_sk(sk)->packets_out = 0; 2530 tcp_sk(sk)->packets_out = 0;
2531 inet_csk(sk)->icsk_backoff = 0;
2531} 2532}
2532 2533
2533int tcp_disconnect(struct sock *sk, int flags) 2534int tcp_disconnect(struct sock *sk, int flags)
@@ -2576,7 +2577,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2576 tp->write_seq += tp->max_window + 2; 2577 tp->write_seq += tp->max_window + 2;
2577 if (tp->write_seq == 0) 2578 if (tp->write_seq == 0)
2578 tp->write_seq = 1; 2579 tp->write_seq = 1;
2579 icsk->icsk_backoff = 0;
2580 tp->snd_cwnd = 2; 2580 tp->snd_cwnd = 2;
2581 icsk->icsk_probes_out = 0; 2581 icsk->icsk_probes_out = 0;
2582 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2582 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index efc6fef692ff..ec3cea9d6828 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
536 if (sock_owned_by_user(sk)) 536 if (sock_owned_by_user(sk))
537 break; 537 break;
538 538
539 skb = tcp_rtx_queue_head(sk);
540 if (WARN_ON_ONCE(!skb))
541 break;
542
539 icsk->icsk_backoff--; 543 icsk->icsk_backoff--;
540 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : 544 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
541 TCP_TIMEOUT_INIT; 545 TCP_TIMEOUT_INIT;
542 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); 546 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
543 547
544 skb = tcp_rtx_queue_head(sk);
545 548
546 tcp_mstamp_refresh(tp); 549 tcp_mstamp_refresh(tp);
547 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); 550 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 730bc44dbad9..ccc78f3a4b60 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2347,6 +2347,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2347 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ 2347 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2348 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; 2348 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2349 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 2349 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2350 tcp_init_tso_segs(skb, mss_now);
2350 goto repair; /* Skip network transmission */ 2351 goto repair; /* Skip network transmission */
2351 } 2352 }
2352 2353
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index f87dbc78b6bc..71a29e9c0620 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -226,7 +226,7 @@ static int tcp_write_timeout(struct sock *sk)
226 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 226 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
227 if (icsk->icsk_retransmits) { 227 if (icsk->icsk_retransmits) {
228 dst_negative_advice(sk); 228 dst_negative_advice(sk);
229 } else if (!tp->syn_data && !tp->syn_fastopen) { 229 } else {
230 sk_rethink_txhash(sk); 230 sk_rethink_txhash(sk);
231 } 231 }
232 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; 232 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 3fb0ed5e4789..372fdc5381a9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
562 562
563 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 563 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
564 int (*handler)(struct sk_buff *skb, u32 info); 564 int (*handler)(struct sk_buff *skb, u32 info);
565 const struct ip_tunnel_encap_ops *encap;
565 566
566 if (!iptun_encaps[i]) 567 encap = rcu_dereference(iptun_encaps[i]);
568 if (!encap)
567 continue; 569 continue;
568 handler = rcu_dereference(iptun_encaps[i]->err_handler); 570 handler = encap->err_handler;
569 if (handler && !handler(skb, info)) 571 if (handler && !handler(skb, info))
570 return 0; 572 return 0;
571 } 573 }
@@ -847,15 +849,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
847 const int hlen = skb_network_header_len(skb) + 849 const int hlen = skb_network_header_len(skb) +
848 sizeof(struct udphdr); 850 sizeof(struct udphdr);
849 851
850 if (hlen + cork->gso_size > cork->fragsize) 852 if (hlen + cork->gso_size > cork->fragsize) {
853 kfree_skb(skb);
851 return -EINVAL; 854 return -EINVAL;
852 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) 855 }
856 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
857 kfree_skb(skb);
853 return -EINVAL; 858 return -EINVAL;
854 if (sk->sk_no_check_tx) 859 }
860 if (sk->sk_no_check_tx) {
861 kfree_skb(skb);
855 return -EINVAL; 862 return -EINVAL;
863 }
856 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 864 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
857 dst_xfrm(skb_dst(skb))) 865 dst_xfrm(skb_dst(skb))) {
866 kfree_skb(skb);
858 return -EIO; 867 return -EIO;
868 }
859 869
860 skb_shinfo(skb)->gso_size = cork->gso_size; 870 skb_shinfo(skb)->gso_size = cork->gso_size;
861 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 871 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
@@ -1918,7 +1928,7 @@ void udp_lib_rehash(struct sock *sk, u16 newhash)
1918} 1928}
1919EXPORT_SYMBOL(udp_lib_rehash); 1929EXPORT_SYMBOL(udp_lib_rehash);
1920 1930
1921static void udp_v4_rehash(struct sock *sk) 1931void udp_v4_rehash(struct sock *sk)
1922{ 1932{
1923 u16 new_hash = ipv4_portaddr_hash(sock_net(sk), 1933 u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
1924 inet_sk(sk)->inet_rcv_saddr, 1934 inet_sk(sk)->inet_rcv_saddr,
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 322672655419..6b2fa77eeb1c 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -10,6 +10,7 @@ int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
10int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); 10int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
11 11
12int udp_v4_get_port(struct sock *sk, unsigned short snum); 12int udp_v4_get_port(struct sock *sk, unsigned short snum);
13void udp_v4_rehash(struct sock *sk);
13 14
14int udp_setsockopt(struct sock *sk, int level, int optname, 15int udp_setsockopt(struct sock *sk, int level, int optname,
15 char __user *optval, unsigned int optlen); 16 char __user *optval, unsigned int optlen);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 39c7f17d916f..3c94b8f0ff27 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -53,6 +53,7 @@ struct proto udplite_prot = {
53 .sendpage = udp_sendpage, 53 .sendpage = udp_sendpage,
54 .hash = udp_lib_hash, 54 .hash = udp_lib_hash,
55 .unhash = udp_lib_unhash, 55 .unhash = udp_lib_unhash,
56 .rehash = udp_v4_rehash,
56 .get_port = udp_v4_get_port, 57 .get_port = udp_v4_get_port,
57 .memory_allocated = &udp_memory_allocated, 58 .memory_allocated = &udp_memory_allocated,
58 .sysctl_mem = sysctl_udp_mem, 59 .sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8eeec6eb2bd3..72ffd3d760ff 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1165,7 +1165,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1165 list_for_each_entry(ifa, &idev->addr_list, if_list) { 1165 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1166 if (ifa == ifp) 1166 if (ifa == ifp)
1167 continue; 1167 continue;
1168 if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr, 1168 if (ifa->prefix_len != ifp->prefix_len ||
1169 !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1169 ifp->prefix_len)) 1170 ifp->prefix_len))
1170 continue; 1171 continue;
1171 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) 1172 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
@@ -3495,8 +3496,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3495 3496
3496 if (!addrconf_link_ready(dev)) { 3497 if (!addrconf_link_ready(dev)) {
3497 /* device is not ready yet. */ 3498 /* device is not ready yet. */
3498 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", 3499 pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3499 dev->name); 3500 dev->name);
3500 break; 3501 break;
3501 } 3502 }
3502 3503
@@ -5120,6 +5121,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5120 if (idev) { 5121 if (idev) {
5121 err = in6_dump_addrs(idev, skb, cb, s_ip_idx, 5122 err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
5122 &fillargs); 5123 &fillargs);
5124 if (err > 0)
5125 err = 0;
5123 } 5126 }
5124 goto put_tgt_net; 5127 goto put_tgt_net;
5125 } 5128 }
@@ -5154,7 +5157,7 @@ put_tgt_net:
5154 if (fillargs.netnsid >= 0) 5157 if (fillargs.netnsid >= 0)
5155 put_net(tgt_net); 5158 put_net(tgt_net);
5156 5159
5157 return err < 0 ? err : skb->len; 5160 return skb->len ? : err;
5158} 5161}
5159 5162
5160static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 5163static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 0bfb6cc0a30a..d99753b5e39b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -310,6 +310,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
310 310
311 /* Check if the address belongs to the host. */ 311 /* Check if the address belongs to the host. */
312 if (addr_type == IPV6_ADDR_MAPPED) { 312 if (addr_type == IPV6_ADDR_MAPPED) {
313 struct net_device *dev = NULL;
313 int chk_addr_ret; 314 int chk_addr_ret;
314 315
315 /* Binding to v4-mapped address on a v6-only socket 316 /* Binding to v4-mapped address on a v6-only socket
@@ -320,9 +321,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
320 goto out; 321 goto out;
321 } 322 }
322 323
324 rcu_read_lock();
325 if (sk->sk_bound_dev_if) {
326 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
327 if (!dev) {
328 err = -ENODEV;
329 goto out_unlock;
330 }
331 }
332
323 /* Reproduce AF_INET checks to make the bindings consistent */ 333 /* Reproduce AF_INET checks to make the bindings consistent */
324 v4addr = addr->sin6_addr.s6_addr32[3]; 334 v4addr = addr->sin6_addr.s6_addr32[3];
325 chk_addr_ret = inet_addr_type(net, v4addr); 335 chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
336 rcu_read_unlock();
337
326 if (!inet_can_nonlocal_bind(net, inet) && 338 if (!inet_can_nonlocal_bind(net, inet) &&
327 v4addr != htonl(INADDR_ANY) && 339 v4addr != htonl(INADDR_ANY) &&
328 chk_addr_ret != RTN_LOCAL && 340 chk_addr_ret != RTN_LOCAL &&
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index bde08aa549f3..ee4a4e54d016 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
341 skb_reset_network_header(skb); 341 skb_reset_network_header(skb);
342 iph = ipv6_hdr(skb); 342 iph = ipv6_hdr(skb);
343 iph->daddr = fl6->daddr; 343 iph->daddr = fl6->daddr;
344 ip6_flow_hdr(iph, 0, 0);
344 345
345 serr = SKB_EXT_ERR(skb); 346 serr = SKB_EXT_ERR(skb);
346 serr->ee.ee_errno = err; 347 serr->ee.ee_errno = err;
@@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
700 } 701 }
701 if (np->rxopt.bits.rxorigdstaddr) { 702 if (np->rxopt.bits.rxorigdstaddr) {
702 struct sockaddr_in6 sin6; 703 struct sockaddr_in6 sin6;
703 __be16 *ports; 704 __be16 _ports[2], *ports;
704 int end;
705 705
706 end = skb_transport_offset(skb) + 4; 706 ports = skb_header_pointer(skb, skb_transport_offset(skb),
707 if (end <= 0 || pskb_may_pull(skb, end)) { 707 sizeof(_ports), &_ports);
708 if (ports) {
708 /* All current transport protocols have the port numbers in the 709 /* All current transport protocols have the port numbers in the
709 * first four bytes of the transport header and this function is 710 * first four bytes of the transport header and this function is
710 * written with this assumption in mind. 711 * written with this assumption in mind.
711 */ 712 */
712 ports = (__be16 *)skb_transport_header(skb);
713
714 sin6.sin6_family = AF_INET6; 713 sin6.sin6_family = AF_INET6;
715 sin6.sin6_addr = ipv6_hdr(skb)->daddr; 714 sin6.sin6_addr = ipv6_hdr(skb)->daddr;
716 sin6.sin6_port = ports[1]; 715 sin6.sin6_port = ports[1];
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 5afe9f83374d..239d4a65ad6e 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -296,7 +296,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
296 skb->len += tailen; 296 skb->len += tailen;
297 skb->data_len += tailen; 297 skb->data_len += tailen;
298 skb->truesize += tailen; 298 skb->truesize += tailen;
299 if (sk) 299 if (sk && sk_fullsock(sk))
300 refcount_add(tailen, &sk->sk_wmem_alloc); 300 refcount_add(tailen, &sk->sk_wmem_alloc);
301 301
302 goto out; 302 goto out;
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index bd675c61deb1..867474abe269 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -72,7 +72,7 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
72 72
73static int gue6_err_proto_handler(int proto, struct sk_buff *skb, 73static int gue6_err_proto_handler(int proto, struct sk_buff *skb,
74 struct inet6_skb_parm *opt, 74 struct inet6_skb_parm *opt,
75 u8 type, u8 code, int offset, u32 info) 75 u8 type, u8 code, int offset, __be32 info)
76{ 76{
77 const struct inet6_protocol *ipprot; 77 const struct inet6_protocol *ipprot;
78 78
@@ -90,10 +90,11 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
90{ 90{
91 int transport_offset = skb_transport_offset(skb); 91 int transport_offset = skb_transport_offset(skb);
92 struct guehdr *guehdr; 92 struct guehdr *guehdr;
93 size_t optlen; 93 size_t len, optlen;
94 int ret; 94 int ret;
95 95
96 if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) 96 len = sizeof(struct udphdr) + sizeof(struct guehdr);
97 if (!pskb_may_pull(skb, len))
97 return -EINVAL; 98 return -EINVAL;
98 99
99 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; 100 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -128,9 +129,21 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
128 129
129 optlen = guehdr->hlen << 2; 130 optlen = guehdr->hlen << 2;
130 131
132 if (!pskb_may_pull(skb, len + optlen))
133 return -EINVAL;
134
135 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
131 if (validate_gue_flags(guehdr, optlen)) 136 if (validate_gue_flags(guehdr, optlen))
132 return -EINVAL; 137 return -EINVAL;
133 138
139 /* Handling exceptions for direct UDP encapsulation in GUE would lead to
140 * recursion. Besides, this kind of encapsulation can't even be
141 * configured currently. Discard this.
142 */
143 if (guehdr->proto_ctype == IPPROTO_UDP ||
144 guehdr->proto_ctype == IPPROTO_UDPLITE)
145 return -EOPNOTSUPP;
146
134 skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr)); 147 skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr));
135 ret = gue6_err_proto_handler(guehdr->proto_ctype, skb, 148 ret = gue6_err_proto_handler(guehdr->proto_ctype, skb,
136 opt, type, code, offset, info); 149 opt, type, code, offset, info);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 5d7aa2c2770c..bbcdfd299692 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -423,10 +423,10 @@ static int icmp6_iif(const struct sk_buff *skb)
423static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, 423static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
424 const struct in6_addr *force_saddr) 424 const struct in6_addr *force_saddr)
425{ 425{
426 struct net *net = dev_net(skb->dev);
427 struct inet6_dev *idev = NULL; 426 struct inet6_dev *idev = NULL;
428 struct ipv6hdr *hdr = ipv6_hdr(skb); 427 struct ipv6hdr *hdr = ipv6_hdr(skb);
429 struct sock *sk; 428 struct sock *sk;
429 struct net *net;
430 struct ipv6_pinfo *np; 430 struct ipv6_pinfo *np;
431 const struct in6_addr *saddr = NULL; 431 const struct in6_addr *saddr = NULL;
432 struct dst_entry *dst; 432 struct dst_entry *dst;
@@ -437,12 +437,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
437 int iif = 0; 437 int iif = 0;
438 int addr_type = 0; 438 int addr_type = 0;
439 int len; 439 int len;
440 u32 mark = IP6_REPLY_MARK(net, skb->mark); 440 u32 mark;
441 441
442 if ((u8 *)hdr < skb->head || 442 if ((u8 *)hdr < skb->head ||
443 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) 443 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
444 return; 444 return;
445 445
446 if (!skb->dev)
447 return;
448 net = dev_net(skb->dev);
449 mark = IP6_REPLY_MARK(net, skb->mark);
446 /* 450 /*
447 * Make sure we respect the rules 451 * Make sure we respect the rules
448 * i.e. RFC 1885 2.4(e) 452 * i.e. RFC 1885 2.4(e)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 09d0826742f8..26f25b6e2833 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -534,13 +534,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
534 struct ip6_tnl *tunnel; 534 struct ip6_tnl *tunnel;
535 u8 ver; 535 u8 ver;
536 536
537 if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
538 return PACKET_REJECT;
539
540 ipv6h = ipv6_hdr(skb); 537 ipv6h = ipv6_hdr(skb);
541 ershdr = (struct erspan_base_hdr *)skb->data; 538 ershdr = (struct erspan_base_hdr *)skb->data;
542 ver = ershdr->ver; 539 ver = ershdr->ver;
543 tpi->key = cpu_to_be32(get_session_id(ershdr));
544 540
545 tunnel = ip6gre_tunnel_lookup(skb->dev, 541 tunnel = ip6gre_tunnel_lookup(skb->dev,
546 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 542 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
@@ -922,6 +918,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
922 __u8 dsfield = false; 918 __u8 dsfield = false;
923 struct flowi6 fl6; 919 struct flowi6 fl6;
924 int err = -EINVAL; 920 int err = -EINVAL;
921 __be16 proto;
925 __u32 mtu; 922 __u32 mtu;
926 int nhoff; 923 int nhoff;
927 int thoff; 924 int thoff;
@@ -1035,8 +1032,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
1035 } 1032 }
1036 1033
1037 /* Push GRE header. */ 1034 /* Push GRE header. */
1038 gre_build_header(skb, 8, TUNNEL_SEQ, 1035 proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
1039 htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++)); 1036 : htons(ETH_P_ERSPAN2);
1037 gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
1040 1038
1041 /* TooBig packet may have updated dst->dev's mtu */ 1039 /* TooBig packet may have updated dst->dev's mtu */
1042 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) 1040 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
@@ -1169,6 +1167,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1169 t->parms.i_flags = p->i_flags; 1167 t->parms.i_flags = p->i_flags;
1170 t->parms.o_flags = p->o_flags; 1168 t->parms.o_flags = p->o_flags;
1171 t->parms.fwmark = p->fwmark; 1169 t->parms.fwmark = p->fwmark;
1170 t->parms.erspan_ver = p->erspan_ver;
1171 t->parms.index = p->index;
1172 t->parms.dir = p->dir;
1173 t->parms.hwid = p->hwid;
1172 dst_cache_reset(&t->dst_cache); 1174 dst_cache_reset(&t->dst_cache);
1173} 1175}
1174 1176
@@ -1717,6 +1719,27 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1717 return 0; 1719 return 0;
1718} 1720}
1719 1721
1722static void ip6erspan_set_version(struct nlattr *data[],
1723 struct __ip6_tnl_parm *parms)
1724{
1725 if (!data)
1726 return;
1727
1728 parms->erspan_ver = 1;
1729 if (data[IFLA_GRE_ERSPAN_VER])
1730 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1731
1732 if (parms->erspan_ver == 1) {
1733 if (data[IFLA_GRE_ERSPAN_INDEX])
1734 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1735 } else if (parms->erspan_ver == 2) {
1736 if (data[IFLA_GRE_ERSPAN_DIR])
1737 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1738 if (data[IFLA_GRE_ERSPAN_HWID])
1739 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1740 }
1741}
1742
1720static void ip6gre_netlink_parms(struct nlattr *data[], 1743static void ip6gre_netlink_parms(struct nlattr *data[],
1721 struct __ip6_tnl_parm *parms) 1744 struct __ip6_tnl_parm *parms)
1722{ 1745{
@@ -1765,20 +1788,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
1765 1788
1766 if (data[IFLA_GRE_COLLECT_METADATA]) 1789 if (data[IFLA_GRE_COLLECT_METADATA])
1767 parms->collect_md = true; 1790 parms->collect_md = true;
1768
1769 parms->erspan_ver = 1;
1770 if (data[IFLA_GRE_ERSPAN_VER])
1771 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1772
1773 if (parms->erspan_ver == 1) {
1774 if (data[IFLA_GRE_ERSPAN_INDEX])
1775 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1776 } else if (parms->erspan_ver == 2) {
1777 if (data[IFLA_GRE_ERSPAN_DIR])
1778 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1779 if (data[IFLA_GRE_ERSPAN_HWID])
1780 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1781 }
1782} 1791}
1783 1792
1784static int ip6gre_tap_init(struct net_device *dev) 1793static int ip6gre_tap_init(struct net_device *dev)
@@ -2025,9 +2034,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2025 struct nlattr *data[], 2034 struct nlattr *data[],
2026 struct netlink_ext_ack *extack) 2035 struct netlink_ext_ack *extack)
2027{ 2036{
2028 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); 2037 struct ip6_tnl *t = netdev_priv(dev);
2038 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
2029 struct __ip6_tnl_parm p; 2039 struct __ip6_tnl_parm p;
2030 struct ip6_tnl *t;
2031 2040
2032 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2041 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2033 if (IS_ERR(t)) 2042 if (IS_ERR(t))
@@ -2096,12 +2105,31 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2096{ 2105{
2097 struct ip6_tnl *t = netdev_priv(dev); 2106 struct ip6_tnl *t = netdev_priv(dev);
2098 struct __ip6_tnl_parm *p = &t->parms; 2107 struct __ip6_tnl_parm *p = &t->parms;
2108 __be16 o_flags = p->o_flags;
2109
2110 if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2111 if (!p->collect_md)
2112 o_flags |= TUNNEL_KEY;
2113
2114 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2115 goto nla_put_failure;
2116
2117 if (p->erspan_ver == 1) {
2118 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2119 goto nla_put_failure;
2120 } else {
2121 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2122 goto nla_put_failure;
2123 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2124 goto nla_put_failure;
2125 }
2126 }
2099 2127
2100 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2128 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2101 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2129 nla_put_be16(skb, IFLA_GRE_IFLAGS,
2102 gre_tnl_flags_to_gre_flags(p->i_flags)) || 2130 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2103 nla_put_be16(skb, IFLA_GRE_OFLAGS, 2131 nla_put_be16(skb, IFLA_GRE_OFLAGS,
2104 gre_tnl_flags_to_gre_flags(p->o_flags)) || 2132 gre_tnl_flags_to_gre_flags(o_flags)) ||
2105 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 2133 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2106 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 2134 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2107 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || 2135 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
@@ -2110,8 +2138,7 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2110 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || 2138 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2111 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || 2139 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2112 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || 2140 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
2113 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) || 2141 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
2114 nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2115 goto nla_put_failure; 2142 goto nla_put_failure;
2116 2143
2117 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 2144 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
@@ -2129,19 +2156,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2129 goto nla_put_failure; 2156 goto nla_put_failure;
2130 } 2157 }
2131 2158
2132 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2133 goto nla_put_failure;
2134
2135 if (p->erspan_ver == 1) {
2136 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2137 goto nla_put_failure;
2138 } else if (p->erspan_ver == 2) {
2139 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2140 goto nla_put_failure;
2141 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2142 goto nla_put_failure;
2143 }
2144
2145 return 0; 2159 return 0;
2146 2160
2147nla_put_failure: 2161nla_put_failure:
@@ -2196,6 +2210,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
2196 int err; 2210 int err;
2197 2211
2198 ip6gre_netlink_parms(data, &nt->parms); 2212 ip6gre_netlink_parms(data, &nt->parms);
2213 ip6erspan_set_version(data, &nt->parms);
2199 ign = net_generic(net, ip6gre_net_id); 2214 ign = net_generic(net, ip6gre_net_id);
2200 2215
2201 if (nt->parms.collect_md) { 2216 if (nt->parms.collect_md) {
@@ -2241,6 +2256,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2241 if (IS_ERR(t)) 2256 if (IS_ERR(t))
2242 return PTR_ERR(t); 2257 return PTR_ERR(t);
2243 2258
2259 ip6erspan_set_version(data, &p);
2244 ip6gre_tunnel_unlink_md(ign, t); 2260 ip6gre_tunnel_unlink_md(ign, t);
2245 ip6gre_tunnel_unlink(ign, t); 2261 ip6gre_tunnel_unlink(ign, t);
2246 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); 2262 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 30337b38274b..cc01aa3f2b5e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1516,6 +1516,9 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
1516 continue; 1516 continue;
1517 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); 1517 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1518 list_del_rcu(&c->list); 1518 list_del_rcu(&c->list);
1519 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1520 FIB_EVENT_ENTRY_DEL,
1521 (struct mfc6_cache *)c, mrt->id);
1519 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); 1522 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1520 mr_cache_put(c); 1523 mr_cache_put(c);
1521 } 1524 }
@@ -1524,10 +1527,6 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
1524 spin_lock_bh(&mfc_unres_lock); 1527 spin_lock_bh(&mfc_unres_lock);
1525 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { 1528 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1526 list_del(&c->list); 1529 list_del(&c->list);
1527 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1528 FIB_EVENT_ENTRY_DEL,
1529 (struct mfc6_cache *)c,
1530 mrt->id);
1531 mr6_netlink_event(mrt, (struct mfc6_cache *)c, 1530 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1532 RTM_DELROUTE); 1531 RTM_DELROUTE);
1533 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); 1532 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 8b075f0bc351..6d0b1f3e927b 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
23 struct sock *sk = sk_to_full_sk(skb->sk); 23 struct sock *sk = sk_to_full_sk(skb->sk);
24 unsigned int hh_len; 24 unsigned int hh_len;
25 struct dst_entry *dst; 25 struct dst_entry *dst;
26 int strict = (ipv6_addr_type(&iph->daddr) &
27 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
26 struct flowi6 fl6 = { 28 struct flowi6 fl6 = {
27 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : 29 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
28 rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0, 30 strict ? skb_dst(skb)->dev->ifindex : 0,
29 .flowi6_mark = skb->mark, 31 .flowi6_mark = skb->mark,
30 .flowi6_uid = sock_net_uid(net, sk), 32 .flowi6_uid = sock_net_uid(net, sk),
31 .daddr = iph->daddr, 33 .daddr = iph->daddr,
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index 23022447eb49..7a41ee3c11b4 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -226,6 +226,7 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
226 } 226 }
227 227
228 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 228 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
229 target.dst.protonum = IPPROTO_ICMPV6;
229 if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) 230 if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
230 return 0; 231 return 0;
231 232
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 40b225f87d5e..ce15dc4ccbfa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1274,18 +1274,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock);
1274static void rt6_remove_exception(struct rt6_exception_bucket *bucket, 1274static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1275 struct rt6_exception *rt6_ex) 1275 struct rt6_exception *rt6_ex)
1276{ 1276{
1277 struct fib6_info *from;
1277 struct net *net; 1278 struct net *net;
1278 1279
1279 if (!bucket || !rt6_ex) 1280 if (!bucket || !rt6_ex)
1280 return; 1281 return;
1281 1282
1282 net = dev_net(rt6_ex->rt6i->dst.dev); 1283 net = dev_net(rt6_ex->rt6i->dst.dev);
1284 net->ipv6.rt6_stats->fib_rt_cache--;
1285
1286 /* purge completely the exception to allow releasing the held resources:
1287 * some [sk] cache may keep the dst around for unlimited time
1288 */
1289 from = rcu_dereference_protected(rt6_ex->rt6i->from,
1290 lockdep_is_held(&rt6_exception_lock));
1291 rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
1292 fib6_info_release(from);
1293 dst_dev_put(&rt6_ex->rt6i->dst);
1294
1283 hlist_del_rcu(&rt6_ex->hlist); 1295 hlist_del_rcu(&rt6_ex->hlist);
1284 dst_release(&rt6_ex->rt6i->dst); 1296 dst_release(&rt6_ex->rt6i->dst);
1285 kfree_rcu(rt6_ex, rcu); 1297 kfree_rcu(rt6_ex, rcu);
1286 WARN_ON_ONCE(!bucket->depth); 1298 WARN_ON_ONCE(!bucket->depth);
1287 bucket->depth--; 1299 bucket->depth--;
1288 net->ipv6.rt6_stats->fib_rt_cache--;
1289} 1300}
1290 1301
1291/* Remove oldest rt6_ex in bucket and free the memory 1302/* Remove oldest rt6_ex in bucket and free the memory
@@ -1599,15 +1610,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt)
1599static void rt6_update_exception_stamp_rt(struct rt6_info *rt) 1610static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1600{ 1611{
1601 struct rt6_exception_bucket *bucket; 1612 struct rt6_exception_bucket *bucket;
1602 struct fib6_info *from = rt->from;
1603 struct in6_addr *src_key = NULL; 1613 struct in6_addr *src_key = NULL;
1604 struct rt6_exception *rt6_ex; 1614 struct rt6_exception *rt6_ex;
1605 1615 struct fib6_info *from;
1606 if (!from ||
1607 !(rt->rt6i_flags & RTF_CACHE))
1608 return;
1609 1616
1610 rcu_read_lock(); 1617 rcu_read_lock();
1618 from = rcu_dereference(rt->from);
1619 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1620 goto unlock;
1621
1611 bucket = rcu_dereference(from->rt6i_exception_bucket); 1622 bucket = rcu_dereference(from->rt6i_exception_bucket);
1612 1623
1613#ifdef CONFIG_IPV6_SUBTREES 1624#ifdef CONFIG_IPV6_SUBTREES
@@ -1626,6 +1637,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1626 if (rt6_ex) 1637 if (rt6_ex)
1627 rt6_ex->stamp = jiffies; 1638 rt6_ex->stamp = jiffies;
1628 1639
1640unlock:
1629 rcu_read_unlock(); 1641 rcu_read_unlock();
1630} 1642}
1631 1643
@@ -2742,20 +2754,24 @@ static int ip6_route_check_nh_onlink(struct net *net,
2742 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; 2754 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
2743 const struct in6_addr *gw_addr = &cfg->fc_gateway; 2755 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2744 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT; 2756 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2757 struct fib6_info *from;
2745 struct rt6_info *grt; 2758 struct rt6_info *grt;
2746 int err; 2759 int err;
2747 2760
2748 err = 0; 2761 err = 0;
2749 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0); 2762 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2750 if (grt) { 2763 if (grt) {
2764 rcu_read_lock();
2765 from = rcu_dereference(grt->from);
2751 if (!grt->dst.error && 2766 if (!grt->dst.error &&
2752 /* ignore match if it is the default route */ 2767 /* ignore match if it is the default route */
2753 grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) && 2768 from && !ipv6_addr_any(&from->fib6_dst.addr) &&
2754 (grt->rt6i_flags & flags || dev != grt->dst.dev)) { 2769 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2755 NL_SET_ERR_MSG(extack, 2770 NL_SET_ERR_MSG(extack,
2756 "Nexthop has invalid gateway or device mismatch"); 2771 "Nexthop has invalid gateway or device mismatch");
2757 err = -EINVAL; 2772 err = -EINVAL;
2758 } 2773 }
2774 rcu_read_unlock();
2759 2775
2760 ip6_rt_put(grt); 2776 ip6_rt_put(grt);
2761 } 2777 }
@@ -4251,17 +4267,6 @@ struct rt6_nh {
4251 struct list_head next; 4267 struct list_head next;
4252}; 4268};
4253 4269
4254static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
4255{
4256 struct rt6_nh *nh;
4257
4258 list_for_each_entry(nh, rt6_nh_list, next) {
4259 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
4260 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
4261 nh->r_cfg.fc_ifindex);
4262 }
4263}
4264
4265static int ip6_route_info_append(struct net *net, 4270static int ip6_route_info_append(struct net *net,
4266 struct list_head *rt6_nh_list, 4271 struct list_head *rt6_nh_list,
4267 struct fib6_info *rt, 4272 struct fib6_info *rt,
@@ -4407,7 +4412,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
4407 nh->fib6_info = NULL; 4412 nh->fib6_info = NULL;
4408 if (err) { 4413 if (err) {
4409 if (replace && nhn) 4414 if (replace && nhn)
4410 ip6_print_replace_route_err(&rt6_nh_list); 4415 NL_SET_ERR_MSG_MOD(extack,
4416 "multipath route replace failed (check consistency of installed routes)");
4411 err_nh = nh; 4417 err_nh = nh;
4412 goto add_errout; 4418 goto add_errout;
4413 } 4419 }
@@ -4659,7 +4665,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4659 table = rt->fib6_table->tb6_id; 4665 table = rt->fib6_table->tb6_id;
4660 else 4666 else
4661 table = RT6_TABLE_UNSPEC; 4667 table = RT6_TABLE_UNSPEC;
4662 rtm->rtm_table = table; 4668 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
4663 if (nla_put_u32(skb, RTA_TABLE, table)) 4669 if (nla_put_u32(skb, RTA_TABLE, table))
4664 goto nla_put_failure; 4670 goto nla_put_failure;
4665 4671
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 8d0ba757a46c..9b2f272ca164 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
221 rcu_read_unlock(); 221 rcu_read_unlock();
222 222
223 genlmsg_end(msg, hdr); 223 genlmsg_end(msg, hdr);
224 genlmsg_reply(msg, info); 224 return genlmsg_reply(msg, info);
225
226 return 0;
227 225
228nla_put_failure: 226nla_put_failure:
229 rcu_read_unlock(); 227 rcu_read_unlock();
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 8181ee7e1e27..ee5403cbe655 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
146 } else { 146 } else {
147 ip6_flow_hdr(hdr, 0, flowlabel); 147 ip6_flow_hdr(hdr, 0, flowlabel);
148 hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); 148 hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
149
150 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
149 } 151 }
150 152
151 hdr->nexthdr = NEXTHDR_ROUTING; 153 hdr->nexthdr = NEXTHDR_ROUTING;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1e03305c0549..e8a1dabef803 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
546 } 546 }
547 547
548 err = 0; 548 err = 0;
549 if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) 549 if (__in6_dev_get(skb->dev) &&
550 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
550 goto out; 551 goto out;
551 552
552 if (t->parms.iph.daddr == 0) 553 if (t->parms.iph.daddr == 0)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9cbf363172bd..b444483cdb2b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -102,7 +102,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
102 return udp_lib_get_port(sk, snum, hash2_nulladdr); 102 return udp_lib_get_port(sk, snum, hash2_nulladdr);
103} 103}
104 104
105static void udp_v6_rehash(struct sock *sk) 105void udp_v6_rehash(struct sock *sk)
106{ 106{
107 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 107 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
108 &sk->sk_v6_rcv_saddr, 108 &sk->sk_v6_rcv_saddr,
@@ -288,8 +288,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
288 int peeked, peeking, off; 288 int peeked, peeking, off;
289 int err; 289 int err;
290 int is_udplite = IS_UDPLITE(sk); 290 int is_udplite = IS_UDPLITE(sk);
291 struct udp_mib __percpu *mib;
291 bool checksum_valid = false; 292 bool checksum_valid = false;
292 struct udp_mib *mib;
293 int is_udp4; 293 int is_udp4;
294 294
295 if (flags & MSG_ERRQUEUE) 295 if (flags & MSG_ERRQUEUE)
@@ -420,17 +420,19 @@ EXPORT_SYMBOL(udpv6_encap_enable);
420 */ 420 */
421static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 421static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
422 struct inet6_skb_parm *opt, 422 struct inet6_skb_parm *opt,
423 u8 type, u8 code, int offset, u32 info) 423 u8 type, u8 code, int offset, __be32 info)
424{ 424{
425 int i; 425 int i;
426 426
427 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 427 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
428 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 428 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
429 u8 type, u8 code, int offset, u32 info); 429 u8 type, u8 code, int offset, __be32 info);
430 const struct ip6_tnl_encap_ops *encap;
430 431
431 if (!ip6tun_encaps[i]) 432 encap = rcu_dereference(ip6tun_encaps[i]);
433 if (!encap)
432 continue; 434 continue;
433 handler = rcu_dereference(ip6tun_encaps[i]->err_handler); 435 handler = encap->err_handler;
434 if (handler && !handler(skb, opt, type, code, offset, info)) 436 if (handler && !handler(skb, opt, type, code, offset, info))
435 return 0; 437 return 0;
436 } 438 }
@@ -1132,15 +1134,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1132 const int hlen = skb_network_header_len(skb) + 1134 const int hlen = skb_network_header_len(skb) +
1133 sizeof(struct udphdr); 1135 sizeof(struct udphdr);
1134 1136
1135 if (hlen + cork->gso_size > cork->fragsize) 1137 if (hlen + cork->gso_size > cork->fragsize) {
1138 kfree_skb(skb);
1136 return -EINVAL; 1139 return -EINVAL;
1137 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) 1140 }
1141 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
1142 kfree_skb(skb);
1138 return -EINVAL; 1143 return -EINVAL;
1139 if (udp_sk(sk)->no_check6_tx) 1144 }
1145 if (udp_sk(sk)->no_check6_tx) {
1146 kfree_skb(skb);
1140 return -EINVAL; 1147 return -EINVAL;
1148 }
1141 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1149 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1142 dst_xfrm(skb_dst(skb))) 1150 dst_xfrm(skb_dst(skb))) {
1151 kfree_skb(skb);
1143 return -EIO; 1152 return -EIO;
1153 }
1144 1154
1145 skb_shinfo(skb)->gso_size = cork->gso_size; 1155 skb_shinfo(skb)->gso_size = cork->gso_size;
1146 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1156 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
@@ -1390,10 +1400,7 @@ do_udp_sendmsg:
1390 ipc6.opt = opt; 1400 ipc6.opt = opt;
1391 1401
1392 fl6.flowi6_proto = sk->sk_protocol; 1402 fl6.flowi6_proto = sk->sk_protocol;
1393 if (!ipv6_addr_any(daddr)) 1403 fl6.daddr = *daddr;
1394 fl6.daddr = *daddr;
1395 else
1396 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1397 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) 1404 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1398 fl6.saddr = np->saddr; 1405 fl6.saddr = np->saddr;
1399 fl6.fl6_sport = inet->inet_sport; 1406 fl6.fl6_sport = inet->inet_sport;
@@ -1421,6 +1428,9 @@ do_udp_sendmsg:
1421 } 1428 }
1422 } 1429 }
1423 1430
1431 if (ipv6_addr_any(&fl6.daddr))
1432 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1433
1424 final_p = fl6_update_dst(&fl6, opt, &final); 1434 final_p = fl6_update_dst(&fl6, opt, &final);
1425 if (final_p) 1435 if (final_p)
1426 connected = false; 1436 connected = false;
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 5730e6503cb4..20e324b6f358 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -13,6 +13,7 @@ int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
13 __be32, struct udp_table *); 13 __be32, struct udp_table *);
14 14
15int udp_v6_get_port(struct sock *sk, unsigned short snum); 15int udp_v6_get_port(struct sock *sk, unsigned short snum);
16void udp_v6_rehash(struct sock *sk);
16 17
17int udpv6_getsockopt(struct sock *sk, int level, int optname, 18int udpv6_getsockopt(struct sock *sk, int level, int optname,
18 char __user *optval, int __user *optlen); 19 char __user *optval, int __user *optlen);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index a125aebc29e5..f35907836444 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -49,6 +49,7 @@ struct proto udplitev6_prot = {
49 .recvmsg = udpv6_recvmsg, 49 .recvmsg = udpv6_recvmsg,
50 .hash = udp_lib_hash, 50 .hash = udp_lib_hash,
51 .unhash = udp_lib_unhash, 51 .unhash = udp_lib_unhash,
52 .rehash = udp_v6_rehash,
52 .get_port = udp_v6_get_port, 53 .get_port = udp_v6_get_port,
53 .memory_allocated = &udp_memory_allocated, 54 .memory_allocated = &udp_memory_allocated,
54 .sysctl_mem = sysctl_udp_mem, 55 .sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index f5b4febeaa25..bc65db782bfb 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
344 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 344 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
345 unsigned int i; 345 unsigned int i;
346 346
347 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
348 xfrm_flush_gc(); 347 xfrm_flush_gc();
348 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
349 349
350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) 350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); 351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 655c787f9d54..5651c29cb5bd 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock)
196 return 0; 196 return 0;
197} 197}
198 198
199static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, 199static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
200 gfp_t allocation, struct sock *sk) 200 struct sock *sk)
201{ 201{
202 int err = -ENOBUFS; 202 int err = -ENOBUFS;
203 203
204 sock_hold(sk); 204 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
205 if (*skb2 == NULL) { 205 return err;
206 if (refcount_read(&skb->users) != 1) { 206
207 *skb2 = skb_clone(skb, allocation); 207 skb = skb_clone(skb, allocation);
208 } else { 208
209 *skb2 = skb; 209 if (skb) {
210 refcount_inc(&skb->users); 210 skb_set_owner_r(skb, sk);
211 } 211 skb_queue_tail(&sk->sk_receive_queue, skb);
212 } 212 sk->sk_data_ready(sk);
213 if (*skb2 != NULL) { 213 err = 0;
214 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
215 skb_set_owner_r(*skb2, sk);
216 skb_queue_tail(&sk->sk_receive_queue, *skb2);
217 sk->sk_data_ready(sk);
218 *skb2 = NULL;
219 err = 0;
220 }
221 } 214 }
222 sock_put(sk);
223 return err; 215 return err;
224} 216}
225 217
@@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
234{ 226{
235 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 227 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
236 struct sock *sk; 228 struct sock *sk;
237 struct sk_buff *skb2 = NULL;
238 int err = -ESRCH; 229 int err = -ESRCH;
239 230
240 /* XXX Do we need something like netlink_overrun? I think 231 /* XXX Do we need something like netlink_overrun? I think
@@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
253 * socket. 244 * socket.
254 */ 245 */
255 if (pfk->promisc) 246 if (pfk->promisc)
256 pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); 247 pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
257 248
258 /* the exact target will be processed later */ 249 /* the exact target will be processed later */
259 if (sk == one_sk) 250 if (sk == one_sk)
@@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
268 continue; 259 continue;
269 } 260 }
270 261
271 err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); 262 err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
272 263
273 /* Error is cleared after successful sending to at least one 264 /* Error is cleared after successful sending to at least one
274 * registered KM */ 265 * registered KM */
@@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
278 rcu_read_unlock(); 269 rcu_read_unlock();
279 270
280 if (one_sk != NULL) 271 if (one_sk != NULL)
281 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); 272 err = pfkey_broadcast_one(skb, allocation, one_sk);
282 273
283 kfree_skb(skb2);
284 kfree_skb(skb); 274 kfree_skb(skb);
285 return err; 275 return err;
286} 276}
@@ -1783,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
1783 if (proto == 0) 1773 if (proto == 0)
1784 return -EINVAL; 1774 return -EINVAL;
1785 1775
1786 err = xfrm_state_flush(net, proto, true); 1776 err = xfrm_state_flush(net, proto, true, false);
1787 err2 = unicast_flush_resp(sk, hdr); 1777 err2 = unicast_flush_resp(sk, hdr);
1788 if (err || err2) { 1778 if (err || err2) {
1789 if (err == -ESRCH) /* empty table - go quietly */ 1779 if (err == -ESRCH) /* empty table - go quietly */
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 26f1d435696a..fed6becc5daf 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -83,8 +83,7 @@
83#define L2TP_SLFLAG_S 0x40000000 83#define L2TP_SLFLAG_S 0x40000000
84#define L2TP_SL_SEQ_MASK 0x00ffffff 84#define L2TP_SL_SEQ_MASK 0x00ffffff
85 85
86#define L2TP_HDR_SIZE_SEQ 10 86#define L2TP_HDR_SIZE_MAX 14
87#define L2TP_HDR_SIZE_NOSEQ 6
88 87
89/* Default trace flags */ 88/* Default trace flags */
90#define L2TP_DEFAULT_DEBUG_FLAGS 0 89#define L2TP_DEFAULT_DEBUG_FLAGS 0
@@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
808 __skb_pull(skb, sizeof(struct udphdr)); 807 __skb_pull(skb, sizeof(struct udphdr));
809 808
810 /* Short packet? */ 809 /* Short packet? */
811 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { 810 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
812 l2tp_info(tunnel, L2TP_MSG_DATA, 811 l2tp_info(tunnel, L2TP_MSG_DATA,
813 "%s: recv short packet (len=%d)\n", 812 "%s: recv short packet (len=%d)\n",
814 tunnel->name, skb->len); 813 tunnel->name, skb->len);
@@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
884 goto error; 883 goto error;
885 } 884 }
886 885
886 if (tunnel->version == L2TP_HDR_VER_3 &&
887 l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
888 goto error;
889
887 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); 890 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
888 l2tp_session_dec_refcount(session); 891 l2tp_session_dec_refcount(session);
889 892
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9c9afe94d389..b2ce90260c35 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
301} 301}
302#endif 302#endif
303 303
304static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
305 unsigned char **ptr, unsigned char **optr)
306{
307 int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
308
309 if (opt_len > 0) {
310 int off = *ptr - *optr;
311
312 if (!pskb_may_pull(skb, off + opt_len))
313 return -1;
314
315 if (skb->data != *optr) {
316 *optr = skb->data;
317 *ptr = skb->data + off;
318 }
319 }
320
321 return 0;
322}
323
304#define l2tp_printk(ptr, type, func, fmt, ...) \ 324#define l2tp_printk(ptr, type, func, fmt, ...) \
305do { \ 325do { \
306 if (((ptr)->debug) & (type)) \ 326 if (((ptr)->debug) & (type)) \
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 35f6f86d4dcc..d4c60523c549 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
165 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 165 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
166 } 166 }
167 167
168 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
169 goto discard_sess;
170
168 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); 171 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
169 l2tp_session_dec_refcount(session); 172 l2tp_session_dec_refcount(session);
170 173
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 237f1a4a0b0c..0ae6899edac0 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
178 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 178 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
179 } 179 }
180 180
181 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
182 goto discard_sess;
183
181 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); 184 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
182 l2tp_session_dec_refcount(session); 185 l2tp_session_dec_refcount(session);
183 186
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 69e831bc317b..54821fb1a960 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -8,7 +8,7 @@
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright(c) 2015-2017 Intel Deutschland GmbH 10 * Copyright(c) 2015-2017 Intel Deutschland GmbH
11 * Copyright (C) 2018 Intel Corporation 11 * Copyright (C) 2018 - 2019 Intel Corporation
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as 14 * it under the terms of the GNU General Public License version 2 as
@@ -366,6 +366,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
366 366
367 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); 367 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
368 368
369 ieee80211_agg_stop_txq(sta, tid);
370
369 spin_unlock_bh(&sta->lock); 371 spin_unlock_bh(&sta->lock);
370 372
371 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", 373 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index de65fe3ed9cc..96496b2c1670 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
941 BSS_CHANGED_P2P_PS | 941 BSS_CHANGED_P2P_PS |
942 BSS_CHANGED_TXPOWER; 942 BSS_CHANGED_TXPOWER;
943 int err; 943 int err;
944 int prev_beacon_int;
944 945
945 old = sdata_dereference(sdata->u.ap.beacon, sdata); 946 old = sdata_dereference(sdata->u.ap.beacon, sdata);
946 if (old) 947 if (old)
@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
963 964
964 sdata->needed_rx_chains = sdata->local->rx_chains; 965 sdata->needed_rx_chains = sdata->local->rx_chains;
965 966
967 prev_beacon_int = sdata->vif.bss_conf.beacon_int;
966 sdata->vif.bss_conf.beacon_int = params->beacon_interval; 968 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
967 969
968 if (params->he_cap) 970 if (params->he_cap)
@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
974 if (!err) 976 if (!err)
975 ieee80211_vif_copy_chanctx_to_vlans(sdata, false); 977 ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
976 mutex_unlock(&local->mtx); 978 mutex_unlock(&local->mtx);
977 if (err) 979 if (err) {
980 sdata->vif.bss_conf.beacon_int = prev_beacon_int;
978 return err; 981 return err;
982 }
979 983
980 /* 984 /*
981 * Apply control port protocol, this allows us to 985 * Apply control port protocol, this allows us to
@@ -1490,6 +1494,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1490 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 1494 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
1491 sta->sta.tdls = true; 1495 sta->sta.tdls = true;
1492 1496
1497 if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
1498 !sdata->u.mgd.associated)
1499 return -EINVAL;
1500
1493 err = sta_apply_parameters(local, sta, params); 1501 err = sta_apply_parameters(local, sta, params);
1494 if (err) { 1502 if (err) {
1495 sta_info_free(local, sta); 1503 sta_info_free(local, sta);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 87a729926734..977dea436ee8 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
615 * We need a bit of data queued to build aggregates properly, so 615 * We need a bit of data queued to build aggregates properly, so
616 * instruct the TCP stack to allow more than a single ms of data 616 * instruct the TCP stack to allow more than a single ms of data
617 * to be queued in the stack. The value is a bit-shift of 1 617 * to be queued in the stack. The value is a bit-shift of 1
618 * second, so 8 is ~4ms of queued data. Only affects local TCP 618 * second, so 7 is ~8ms of queued data. Only affects local TCP
619 * sockets. 619 * sockets.
620 * This is the default, anyhow - drivers may need to override it 620 * This is the default, anyhow - drivers may need to override it
621 * for local reasons (longer buffers, longer completion time, or 621 * for local reasons (longer buffers, longer completion time, or
622 * similar). 622 * similar).
623 */ 623 */
624 local->hw.tx_sk_pacing_shift = 8; 624 local->hw.tx_sk_pacing_shift = 7;
625 625
626 /* set up some defaults */ 626 /* set up some defaults */
627 local->hw.queues = 1; 627 local->hw.queues = 1;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index cad6592c52a1..2ec7011a4d07 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
70 * @dst: mesh path destination mac address 70 * @dst: mesh path destination mac address
71 * @mpp: mesh proxy mac address 71 * @mpp: mesh proxy mac address
72 * @rhash: rhashtable list pointer 72 * @rhash: rhashtable list pointer
73 * @walk_list: linked list containing all mesh_path objects.
73 * @gate_list: list pointer for known gates list 74 * @gate_list: list pointer for known gates list
74 * @sdata: mesh subif 75 * @sdata: mesh subif
75 * @next_hop: mesh neighbor to which frames for this destination will be 76 * @next_hop: mesh neighbor to which frames for this destination will be
@@ -105,6 +106,7 @@ struct mesh_path {
105 u8 dst[ETH_ALEN]; 106 u8 dst[ETH_ALEN];
106 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ 107 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
107 struct rhash_head rhash; 108 struct rhash_head rhash;
109 struct hlist_node walk_list;
108 struct hlist_node gate_list; 110 struct hlist_node gate_list;
109 struct ieee80211_sub_if_data *sdata; 111 struct ieee80211_sub_if_data *sdata;
110 struct sta_info __rcu *next_hop; 112 struct sta_info __rcu *next_hop;
@@ -133,12 +135,16 @@ struct mesh_path {
133 * gate's mpath may or may not be resolved and active. 135 * gate's mpath may or may not be resolved and active.
134 * @gates_lock: protects updates to known_gates 136 * @gates_lock: protects updates to known_gates
135 * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr 137 * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
138 * @walk_head: linked list containging all mesh_path objects
139 * @walk_lock: lock protecting walk_head
136 * @entries: number of entries in the table 140 * @entries: number of entries in the table
137 */ 141 */
138struct mesh_table { 142struct mesh_table {
139 struct hlist_head known_gates; 143 struct hlist_head known_gates;
140 spinlock_t gates_lock; 144 spinlock_t gates_lock;
141 struct rhashtable rhead; 145 struct rhashtable rhead;
146 struct hlist_head walk_head;
147 spinlock_t walk_lock;
142 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ 148 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
143}; 149};
144 150
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a5125624a76d..88a6d5e18ccc 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
59 return NULL; 59 return NULL;
60 60
61 INIT_HLIST_HEAD(&newtbl->known_gates); 61 INIT_HLIST_HEAD(&newtbl->known_gates);
62 INIT_HLIST_HEAD(&newtbl->walk_head);
62 atomic_set(&newtbl->entries, 0); 63 atomic_set(&newtbl->entries, 0);
63 spin_lock_init(&newtbl->gates_lock); 64 spin_lock_init(&newtbl->gates_lock);
65 spin_lock_init(&newtbl->walk_lock);
64 66
65 return newtbl; 67 return newtbl;
66} 68}
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
249static struct mesh_path * 251static struct mesh_path *
250__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) 252__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
251{ 253{
252 int i = 0, ret; 254 int i = 0;
253 struct mesh_path *mpath = NULL; 255 struct mesh_path *mpath;
254 struct rhashtable_iter iter;
255
256 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
257 if (ret)
258 return NULL;
259
260 rhashtable_walk_start(&iter);
261 256
262 while ((mpath = rhashtable_walk_next(&iter))) { 257 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
263 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
264 continue;
265 if (IS_ERR(mpath))
266 break;
267 if (i++ == idx) 258 if (i++ == idx)
268 break; 259 break;
269 } 260 }
270 rhashtable_walk_stop(&iter);
271 rhashtable_walk_exit(&iter);
272 261
273 if (IS_ERR(mpath) || !mpath) 262 if (!mpath)
274 return NULL; 263 return NULL;
275 264
276 if (mpath_expired(mpath)) { 265 if (mpath_expired(mpath)) {
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
432 return ERR_PTR(-ENOMEM); 421 return ERR_PTR(-ENOMEM);
433 422
434 tbl = sdata->u.mesh.mesh_paths; 423 tbl = sdata->u.mesh.mesh_paths;
424 spin_lock_bh(&tbl->walk_lock);
435 do { 425 do {
436 ret = rhashtable_lookup_insert_fast(&tbl->rhead, 426 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
437 &new_mpath->rhash, 427 &new_mpath->rhash,
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
441 mpath = rhashtable_lookup_fast(&tbl->rhead, 431 mpath = rhashtable_lookup_fast(&tbl->rhead,
442 dst, 432 dst,
443 mesh_rht_params); 433 mesh_rht_params);
444 434 else if (!ret)
435 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
445 } while (unlikely(ret == -EEXIST && !mpath)); 436 } while (unlikely(ret == -EEXIST && !mpath));
437 spin_unlock_bh(&tbl->walk_lock);
446 438
447 if (ret && ret != -EEXIST) 439 if (ret) {
448 return ERR_PTR(ret);
449
450 /* At this point either new_mpath was added, or we found a
451 * matching entry already in the table; in the latter case
452 * free the unnecessary new entry.
453 */
454 if (ret == -EEXIST) {
455 kfree(new_mpath); 440 kfree(new_mpath);
441
442 if (ret != -EEXIST)
443 return ERR_PTR(ret);
444
456 new_mpath = mpath; 445 new_mpath = mpath;
457 } 446 }
447
458 sdata->u.mesh.mesh_paths_generation++; 448 sdata->u.mesh.mesh_paths_generation++;
459 return new_mpath; 449 return new_mpath;
460} 450}
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
480 470
481 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 471 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
482 tbl = sdata->u.mesh.mpp_paths; 472 tbl = sdata->u.mesh.mpp_paths;
473
474 spin_lock_bh(&tbl->walk_lock);
483 ret = rhashtable_lookup_insert_fast(&tbl->rhead, 475 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
484 &new_mpath->rhash, 476 &new_mpath->rhash,
485 mesh_rht_params); 477 mesh_rht_params);
478 if (!ret)
479 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
480 spin_unlock_bh(&tbl->walk_lock);
481
482 if (ret)
483 kfree(new_mpath);
486 484
487 sdata->u.mesh.mpp_paths_generation++; 485 sdata->u.mesh.mpp_paths_generation++;
488 return ret; 486 return ret;
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
503 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 501 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
504 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 502 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
505 struct mesh_path *mpath; 503 struct mesh_path *mpath;
506 struct rhashtable_iter iter;
507 int ret;
508
509 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
510 if (ret)
511 return;
512 504
513 rhashtable_walk_start(&iter); 505 rcu_read_lock();
514 506 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
515 while ((mpath = rhashtable_walk_next(&iter))) {
516 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
517 continue;
518 if (IS_ERR(mpath))
519 break;
520 if (rcu_access_pointer(mpath->next_hop) == sta && 507 if (rcu_access_pointer(mpath->next_hop) == sta &&
521 mpath->flags & MESH_PATH_ACTIVE && 508 mpath->flags & MESH_PATH_ACTIVE &&
522 !(mpath->flags & MESH_PATH_FIXED)) { 509 !(mpath->flags & MESH_PATH_FIXED)) {
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
530 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); 517 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
531 } 518 }
532 } 519 }
533 rhashtable_walk_stop(&iter); 520 rcu_read_unlock();
534 rhashtable_walk_exit(&iter);
535} 521}
536 522
537static void mesh_path_free_rcu(struct mesh_table *tbl, 523static void mesh_path_free_rcu(struct mesh_table *tbl,
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
551 537
552static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) 538static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
553{ 539{
540 hlist_del_rcu(&mpath->walk_list);
554 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); 541 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
555 mesh_path_free_rcu(tbl, mpath); 542 mesh_path_free_rcu(tbl, mpath);
556} 543}
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
571 struct ieee80211_sub_if_data *sdata = sta->sdata; 558 struct ieee80211_sub_if_data *sdata = sta->sdata;
572 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 559 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
573 struct mesh_path *mpath; 560 struct mesh_path *mpath;
574 struct rhashtable_iter iter; 561 struct hlist_node *n;
575 int ret;
576
577 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
578 if (ret)
579 return;
580
581 rhashtable_walk_start(&iter);
582
583 while ((mpath = rhashtable_walk_next(&iter))) {
584 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
585 continue;
586 if (IS_ERR(mpath))
587 break;
588 562
563 spin_lock_bh(&tbl->walk_lock);
564 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
589 if (rcu_access_pointer(mpath->next_hop) == sta) 565 if (rcu_access_pointer(mpath->next_hop) == sta)
590 __mesh_path_del(tbl, mpath); 566 __mesh_path_del(tbl, mpath);
591 } 567 }
592 568 spin_unlock_bh(&tbl->walk_lock);
593 rhashtable_walk_stop(&iter);
594 rhashtable_walk_exit(&iter);
595} 569}
596 570
597static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, 571static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
599{ 573{
600 struct mesh_table *tbl = sdata->u.mesh.mpp_paths; 574 struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
601 struct mesh_path *mpath; 575 struct mesh_path *mpath;
602 struct rhashtable_iter iter; 576 struct hlist_node *n;
603 int ret;
604
605 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
606 if (ret)
607 return;
608
609 rhashtable_walk_start(&iter);
610
611 while ((mpath = rhashtable_walk_next(&iter))) {
612 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
613 continue;
614 if (IS_ERR(mpath))
615 break;
616 577
578 spin_lock_bh(&tbl->walk_lock);
579 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
617 if (ether_addr_equal(mpath->mpp, proxy)) 580 if (ether_addr_equal(mpath->mpp, proxy))
618 __mesh_path_del(tbl, mpath); 581 __mesh_path_del(tbl, mpath);
619 } 582 }
620 583 spin_unlock_bh(&tbl->walk_lock);
621 rhashtable_walk_stop(&iter);
622 rhashtable_walk_exit(&iter);
623} 584}
624 585
625static void table_flush_by_iface(struct mesh_table *tbl) 586static void table_flush_by_iface(struct mesh_table *tbl)
626{ 587{
627 struct mesh_path *mpath; 588 struct mesh_path *mpath;
628 struct rhashtable_iter iter; 589 struct hlist_node *n;
629 int ret;
630
631 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
632 if (ret)
633 return;
634
635 rhashtable_walk_start(&iter);
636 590
637 while ((mpath = rhashtable_walk_next(&iter))) { 591 spin_lock_bh(&tbl->walk_lock);
638 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) 592 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
639 continue;
640 if (IS_ERR(mpath))
641 break;
642 __mesh_path_del(tbl, mpath); 593 __mesh_path_del(tbl, mpath);
643 } 594 }
644 595 spin_unlock_bh(&tbl->walk_lock);
645 rhashtable_walk_stop(&iter);
646 rhashtable_walk_exit(&iter);
647} 596}
648 597
649/** 598/**
@@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl,
675{ 624{
676 struct mesh_path *mpath; 625 struct mesh_path *mpath;
677 626
678 rcu_read_lock(); 627 spin_lock_bh(&tbl->walk_lock);
679 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); 628 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
680 if (!mpath) { 629 if (!mpath) {
681 rcu_read_unlock(); 630 spin_unlock_bh(&tbl->walk_lock);
682 return -ENXIO; 631 return -ENXIO;
683 } 632 }
684 633
685 __mesh_path_del(tbl, mpath); 634 __mesh_path_del(tbl, mpath);
686 rcu_read_unlock(); 635 spin_unlock_bh(&tbl->walk_lock);
687 return 0; 636 return 0;
688} 637}
689 638
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
854 struct mesh_table *tbl) 803 struct mesh_table *tbl)
855{ 804{
856 struct mesh_path *mpath; 805 struct mesh_path *mpath;
857 struct rhashtable_iter iter; 806 struct hlist_node *n;
858 int ret;
859 807
860 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); 808 spin_lock_bh(&tbl->walk_lock);
861 if (ret) 809 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
862 return;
863
864 rhashtable_walk_start(&iter);
865
866 while ((mpath = rhashtable_walk_next(&iter))) {
867 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
868 continue;
869 if (IS_ERR(mpath))
870 break;
871 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 810 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
872 (!(mpath->flags & MESH_PATH_FIXED)) && 811 (!(mpath->flags & MESH_PATH_FIXED)) &&
873 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 812 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
874 __mesh_path_del(tbl, mpath); 813 __mesh_path_del(tbl, mpath);
875 } 814 }
876 815 spin_unlock_bh(&tbl->walk_lock);
877 rhashtable_walk_stop(&iter);
878 rhashtable_walk_exit(&iter);
879} 816}
880 817
881void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 818void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 45aad3d3108c..c2a6da5d80da 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -231,7 +231,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
231 struct ieee80211_hdr_3addr hdr; 231 struct ieee80211_hdr_3addr hdr;
232 u8 category; 232 u8 category;
233 u8 action_code; 233 u8 action_code;
234 } __packed action; 234 } __packed __aligned(2) action;
235 235
236 if (!sdata) 236 if (!sdata)
237 return; 237 return;
@@ -2644,6 +2644,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2644 struct ieee80211_sub_if_data *sdata = rx->sdata; 2644 struct ieee80211_sub_if_data *sdata = rx->sdata;
2645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2646 u16 ac, q, hdrlen; 2646 u16 ac, q, hdrlen;
2647 int tailroom = 0;
2647 2648
2648 hdr = (struct ieee80211_hdr *) skb->data; 2649 hdr = (struct ieee80211_hdr *) skb->data;
2649 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2650 hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2723,15 +2724,21 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2723 skb_set_queue_mapping(skb, q); 2724 skb_set_queue_mapping(skb, q);
2724 2725
2725 if (!--mesh_hdr->ttl) { 2726 if (!--mesh_hdr->ttl) {
2726 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 2727 if (!is_multicast_ether_addr(hdr->addr1))
2728 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
2729 dropped_frames_ttl);
2727 goto out; 2730 goto out;
2728 } 2731 }
2729 2732
2730 if (!ifmsh->mshcfg.dot11MeshForwarding) 2733 if (!ifmsh->mshcfg.dot11MeshForwarding)
2731 goto out; 2734 goto out;
2732 2735
2736 if (sdata->crypto_tx_tailroom_needed_cnt)
2737 tailroom = IEEE80211_ENCRYPT_TAILROOM;
2738
2733 fwd_skb = skb_copy_expand(skb, local->tx_headroom + 2739 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2734 sdata->encrypt_headroom, 0, GFP_ATOMIC); 2740 sdata->encrypt_headroom,
2741 tailroom, GFP_ATOMIC);
2735 if (!fwd_skb) 2742 if (!fwd_skb)
2736 goto out; 2743 goto out;
2737 2744
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f170d6c6629a..928f13a208b0 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1938 int head_need, bool may_encrypt) 1938 int head_need, bool may_encrypt)
1939{ 1939{
1940 struct ieee80211_local *local = sdata->local; 1940 struct ieee80211_local *local = sdata->local;
1941 struct ieee80211_hdr *hdr;
1942 bool enc_tailroom;
1941 int tail_need = 0; 1943 int tail_need = 0;
1942 1944
1943 if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { 1945 hdr = (struct ieee80211_hdr *) skb->data;
1946 enc_tailroom = may_encrypt &&
1947 (sdata->crypto_tx_tailroom_needed_cnt ||
1948 ieee80211_is_mgmt(hdr->frame_control));
1949
1950 if (enc_tailroom) {
1944 tail_need = IEEE80211_ENCRYPT_TAILROOM; 1951 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1945 tail_need -= skb_tailroom(skb); 1952 tail_need -= skb_tailroom(skb);
1946 tail_need = max_t(int, tail_need, 0); 1953 tail_need = max_t(int, tail_need, 0);
@@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1948 1955
1949 if (skb_cloned(skb) && 1956 if (skb_cloned(skb) &&
1950 (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || 1957 (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
1951 !skb_clone_writable(skb, ETH_HLEN) || 1958 !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
1952 (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
1953 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1959 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1954 else if (head_need || tail_need) 1960 else if (head_need || tail_need)
1955 I802_DEBUG_INC(local->tx_expand_skb_head); 1961 I802_DEBUG_INC(local->tx_expand_skb_head);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d0eb38b890aa..ba950ae974fc 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -5,7 +5,7 @@
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright (C) 2015-2017 Intel Deutschland GmbH 7 * Copyright (C) 2015-2017 Intel Deutschland GmbH
8 * Copyright (C) 2018 Intel Corporation 8 * Copyright (C) 2018-2019 Intel Corporation
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -2146,6 +2146,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
2146 case NL80211_IFTYPE_AP_VLAN: 2146 case NL80211_IFTYPE_AP_VLAN:
2147 case NL80211_IFTYPE_MONITOR: 2147 case NL80211_IFTYPE_MONITOR:
2148 break; 2148 break;
2149 case NL80211_IFTYPE_ADHOC:
2150 if (sdata->vif.bss_conf.ibss_joined)
2151 WARN_ON(drv_join_ibss(local, sdata));
2152 /* fall through */
2149 default: 2153 default:
2150 ieee80211_reconfig_stations(sdata); 2154 ieee80211_reconfig_stations(sdata);
2151 /* fall through */ 2155 /* fall through */
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index cad48d07c818..8401cefd9f65 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -29,6 +29,7 @@ config IP_VS_IPV6
29 bool "IPv6 support for IPVS" 29 bool "IPv6 support for IPVS"
30 depends on IPV6 = y || IP_VS = IPV6 30 depends on IPV6 = y || IP_VS = IPV6
31 select IP6_NF_IPTABLES 31 select IP6_NF_IPTABLES
32 select NF_DEFRAG_IPV6
32 ---help--- 33 ---help---
33 Add IPv6 support to IPVS. 34 Add IPv6 support to IPVS.
34 35
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index fe9abf3cc10a..235205c93e14 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1536,14 +1536,12 @@ ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
1536 /* sorry, all this trouble for a no-hit :) */ 1536 /* sorry, all this trouble for a no-hit :) */
1537 IP_VS_DBG_PKT(12, af, pp, skb, iph->off, 1537 IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
1538 "ip_vs_in: packet continues traversal as normal"); 1538 "ip_vs_in: packet continues traversal as normal");
1539 if (iph->fragoffs) { 1539
1540 /* Fragment that couldn't be mapped to a conn entry 1540 /* Fragment couldn't be mapped to a conn entry */
1541 * is missing module nf_defrag_ipv6 1541 if (iph->fragoffs)
1542 */
1543 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1544 IP_VS_DBG_PKT(7, af, pp, skb, iph->off, 1542 IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
1545 "unhandled fragment"); 1543 "unhandled fragment");
1546 } 1544
1547 *verdict = NF_ACCEPT; 1545 *verdict = NF_ACCEPT;
1548 return 0; 1546 return 0;
1549 } 1547 }
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 432141f04af3..ac8d848d7624 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -43,6 +43,7 @@
43#ifdef CONFIG_IP_VS_IPV6 43#ifdef CONFIG_IP_VS_IPV6
44#include <net/ipv6.h> 44#include <net/ipv6.h>
45#include <net/ip6_route.h> 45#include <net/ip6_route.h>
46#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
46#endif 47#endif
47#include <net/route.h> 48#include <net/route.h>
48#include <net/sock.h> 49#include <net/sock.h>
@@ -900,11 +901,17 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
900 901
901#ifdef CONFIG_IP_VS_IPV6 902#ifdef CONFIG_IP_VS_IPV6
902 if (udest->af == AF_INET6) { 903 if (udest->af == AF_INET6) {
904 int ret;
905
903 atype = ipv6_addr_type(&udest->addr.in6); 906 atype = ipv6_addr_type(&udest->addr.in6);
904 if ((!(atype & IPV6_ADDR_UNICAST) || 907 if ((!(atype & IPV6_ADDR_UNICAST) ||
905 atype & IPV6_ADDR_LINKLOCAL) && 908 atype & IPV6_ADDR_LINKLOCAL) &&
906 !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6)) 909 !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6))
907 return -EINVAL; 910 return -EINVAL;
911
912 ret = nf_defrag_ipv6_enable(svc->ipvs->net);
913 if (ret)
914 return ret;
908 } else 915 } else
909#endif 916#endif
910 { 917 {
@@ -1228,6 +1235,10 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
1228 ret = -EINVAL; 1235 ret = -EINVAL;
1229 goto out_err; 1236 goto out_err;
1230 } 1237 }
1238
1239 ret = nf_defrag_ipv6_enable(ipvs->net);
1240 if (ret)
1241 goto out_err;
1231 } 1242 }
1232#endif 1243#endif
1233 1244
@@ -2221,6 +2232,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user
2221 u->udp_timeout); 2232 u->udp_timeout);
2222 2233
2223#ifdef CONFIG_IP_VS_PROTO_TCP 2234#ifdef CONFIG_IP_VS_PROTO_TCP
2235 if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
2236 u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
2237 return -EINVAL;
2238 }
2239#endif
2240
2241#ifdef CONFIG_IP_VS_PROTO_UDP
2242 if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
2243 return -EINVAL;
2244#endif
2245
2246#ifdef CONFIG_IP_VS_PROTO_TCP
2224 if (u->tcp_timeout) { 2247 if (u->tcp_timeout) {
2225 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); 2248 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
2226 pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] 2249 pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 741b533148ba..db4d46332e86 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1007 } 1007 }
1008 1008
1009 if (nf_ct_key_equal(h, tuple, zone, net)) { 1009 if (nf_ct_key_equal(h, tuple, zone, net)) {
1010 /* Tuple is taken already, so caller will need to find
1011 * a new source port to use.
1012 *
1013 * Only exception:
1014 * If the *original tuples* are identical, then both
1015 * conntracks refer to the same flow.
1016 * This is a rare situation, it can occur e.g. when
1017 * more than one UDP packet is sent from same socket
1018 * in different threads.
1019 *
1020 * Let nf_ct_resolve_clash() deal with this later.
1021 */
1022 if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1023 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
1024 continue;
1025
1010 NF_CT_STAT_INC_ATOMIC(net, found); 1026 NF_CT_STAT_INC_ATOMIC(net, found);
1011 rcu_read_unlock(); 1027 rcu_read_unlock();
1012 return 1; 1028 return 1;
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index fa0844e2a68d..c0c72ae9df42 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
28{ 28{
29 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; 29 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
30 struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; 30 struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
31 struct dst_entry *other_dst = route->tuple[!dir].dst;
31 struct dst_entry *dst = route->tuple[dir].dst; 32 struct dst_entry *dst = route->tuple[dir].dst;
32 33
33 ft->dir = dir; 34 ft->dir = dir;
@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
50 ft->src_port = ctt->src.u.tcp.port; 51 ft->src_port = ctt->src.u.tcp.port;
51 ft->dst_port = ctt->dst.u.tcp.port; 52 ft->dst_port = ctt->dst.u.tcp.port;
52 53
53 ft->iifidx = route->tuple[dir].ifindex; 54 ft->iifidx = other_dst->dev->ifindex;
54 ft->oifidx = route->tuple[!dir].ifindex; 55 ft->oifidx = dst->dev->ifindex;
55 ft->dst_cache = dst; 56 ft->dst_cache = dst;
56} 57}
57 58
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2b0a93300dd7..4893f248dfdc 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -116,6 +116,23 @@ static void nft_trans_destroy(struct nft_trans *trans)
116 kfree(trans); 116 kfree(trans);
117} 117}
118 118
119static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
120{
121 struct net *net = ctx->net;
122 struct nft_trans *trans;
123
124 if (!nft_set_is_anonymous(set))
125 return;
126
127 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
128 if (trans->msg_type == NFT_MSG_NEWSET &&
129 nft_trans_set(trans) == set) {
130 nft_trans_set_bound(trans) = true;
131 break;
132 }
133 }
134}
135
119static int nf_tables_register_hook(struct net *net, 136static int nf_tables_register_hook(struct net *net,
120 const struct nft_table *table, 137 const struct nft_table *table,
121 struct nft_chain *chain) 138 struct nft_chain *chain)
@@ -211,18 +228,6 @@ static int nft_delchain(struct nft_ctx *ctx)
211 return err; 228 return err;
212} 229}
213 230
214/* either expr ops provide both activate/deactivate, or neither */
215static bool nft_expr_check_ops(const struct nft_expr_ops *ops)
216{
217 if (!ops)
218 return true;
219
220 if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate)))
221 return false;
222
223 return true;
224}
225
226static void nft_rule_expr_activate(const struct nft_ctx *ctx, 231static void nft_rule_expr_activate(const struct nft_ctx *ctx,
227 struct nft_rule *rule) 232 struct nft_rule *rule)
228{ 233{
@@ -238,14 +243,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
238} 243}
239 244
240static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, 245static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
241 struct nft_rule *rule) 246 struct nft_rule *rule,
247 enum nft_trans_phase phase)
242{ 248{
243 struct nft_expr *expr; 249 struct nft_expr *expr;
244 250
245 expr = nft_expr_first(rule); 251 expr = nft_expr_first(rule);
246 while (expr != nft_expr_last(rule) && expr->ops) { 252 while (expr != nft_expr_last(rule) && expr->ops) {
247 if (expr->ops->deactivate) 253 if (expr->ops->deactivate)
248 expr->ops->deactivate(ctx, expr); 254 expr->ops->deactivate(ctx, expr, phase);
249 255
250 expr = nft_expr_next(expr); 256 expr = nft_expr_next(expr);
251 } 257 }
@@ -296,7 +302,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
296 nft_trans_destroy(trans); 302 nft_trans_destroy(trans);
297 return err; 303 return err;
298 } 304 }
299 nft_rule_expr_deactivate(ctx, rule); 305 nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
300 306
301 return 0; 307 return 0;
302} 308}
@@ -307,6 +313,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
307 int err; 313 int err;
308 314
309 list_for_each_entry(rule, &ctx->chain->rules, list) { 315 list_for_each_entry(rule, &ctx->chain->rules, list) {
316 if (!nft_is_active_next(ctx->net, rule))
317 continue;
318
310 err = nft_delrule(ctx, rule); 319 err = nft_delrule(ctx, rule);
311 if (err < 0) 320 if (err < 0)
312 return err; 321 return err;
@@ -1929,9 +1938,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
1929 */ 1938 */
1930int nft_register_expr(struct nft_expr_type *type) 1939int nft_register_expr(struct nft_expr_type *type)
1931{ 1940{
1932 if (!nft_expr_check_ops(type->ops))
1933 return -EINVAL;
1934
1935 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1941 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1936 if (type->family == NFPROTO_UNSPEC) 1942 if (type->family == NFPROTO_UNSPEC)
1937 list_add_tail_rcu(&type->list, &nf_tables_expressions); 1943 list_add_tail_rcu(&type->list, &nf_tables_expressions);
@@ -2079,10 +2085,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
2079 err = PTR_ERR(ops); 2085 err = PTR_ERR(ops);
2080 goto err1; 2086 goto err1;
2081 } 2087 }
2082 if (!nft_expr_check_ops(ops)) {
2083 err = -EINVAL;
2084 goto err1;
2085 }
2086 } else 2088 } else
2087 ops = type->ops; 2089 ops = type->ops;
2088 2090
@@ -2304,7 +2306,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
2304 struct net *net = sock_net(skb->sk); 2306 struct net *net = sock_net(skb->sk);
2305 unsigned int s_idx = cb->args[0]; 2307 unsigned int s_idx = cb->args[0];
2306 const struct nft_rule *rule; 2308 const struct nft_rule *rule;
2307 int rc = 1;
2308 2309
2309 list_for_each_entry_rcu(rule, &chain->rules, list) { 2310 list_for_each_entry_rcu(rule, &chain->rules, list) {
2310 if (!nft_is_active(net, rule)) 2311 if (!nft_is_active(net, rule))
@@ -2321,16 +2322,13 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
2321 NLM_F_MULTI | NLM_F_APPEND, 2322 NLM_F_MULTI | NLM_F_APPEND,
2322 table->family, 2323 table->family,
2323 table, chain, rule) < 0) 2324 table, chain, rule) < 0)
2324 goto out_unfinished; 2325 return 1;
2325 2326
2326 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2327 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2327cont: 2328cont:
2328 (*idx)++; 2329 (*idx)++;
2329 } 2330 }
2330 rc = 0; 2331 return 0;
2331out_unfinished:
2332 cb->args[0] = *idx;
2333 return rc;
2334} 2332}
2335 2333
2336static int nf_tables_dump_rules(struct sk_buff *skb, 2334static int nf_tables_dump_rules(struct sk_buff *skb,
@@ -2354,7 +2352,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
2354 if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0) 2352 if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
2355 continue; 2353 continue;
2356 2354
2357 if (ctx && ctx->chain) { 2355 if (ctx && ctx->table && ctx->chain) {
2358 struct rhlist_head *list, *tmp; 2356 struct rhlist_head *list, *tmp;
2359 2357
2360 list = rhltable_lookup(&table->chains_ht, ctx->chain, 2358 list = rhltable_lookup(&table->chains_ht, ctx->chain,
@@ -2382,6 +2380,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
2382 } 2380 }
2383done: 2381done:
2384 rcu_read_unlock(); 2382 rcu_read_unlock();
2383
2384 cb->args[0] = idx;
2385 return skb->len; 2385 return skb->len;
2386} 2386}
2387 2387
@@ -2513,7 +2513,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2513static void nf_tables_rule_release(const struct nft_ctx *ctx, 2513static void nf_tables_rule_release(const struct nft_ctx *ctx,
2514 struct nft_rule *rule) 2514 struct nft_rule *rule)
2515{ 2515{
2516 nft_rule_expr_deactivate(ctx, rule); 2516 nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
2517 nf_tables_rule_destroy(ctx, rule); 2517 nf_tables_rule_destroy(ctx, rule);
2518} 2518}
2519 2519
@@ -3710,39 +3710,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
3710bind: 3710bind:
3711 binding->chain = ctx->chain; 3711 binding->chain = ctx->chain;
3712 list_add_tail_rcu(&binding->list, &set->bindings); 3712 list_add_tail_rcu(&binding->list, &set->bindings);
3713 nft_set_trans_bind(ctx, set);
3714
3713 return 0; 3715 return 0;
3714} 3716}
3715EXPORT_SYMBOL_GPL(nf_tables_bind_set); 3717EXPORT_SYMBOL_GPL(nf_tables_bind_set);
3716 3718
3717void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
3718 struct nft_set_binding *binding)
3719{
3720 if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
3721 nft_is_active(ctx->net, set))
3722 list_add_tail_rcu(&set->list, &ctx->table->sets);
3723
3724 list_add_tail_rcu(&binding->list, &set->bindings);
3725}
3726EXPORT_SYMBOL_GPL(nf_tables_rebind_set);
3727
3728void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 3719void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
3729 struct nft_set_binding *binding) 3720 struct nft_set_binding *binding, bool event)
3730{ 3721{
3731 list_del_rcu(&binding->list); 3722 list_del_rcu(&binding->list);
3732 3723
3733 if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && 3724 if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
3734 nft_is_active(ctx->net, set))
3735 list_del_rcu(&set->list); 3725 list_del_rcu(&set->list);
3726 if (event)
3727 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
3728 GFP_KERNEL);
3729 }
3736} 3730}
3737EXPORT_SYMBOL_GPL(nf_tables_unbind_set); 3731EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
3738 3732
3739void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) 3733void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
3740{ 3734{
3741 if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && 3735 if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
3742 nft_is_active(ctx->net, set)) {
3743 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
3744 nft_set_destroy(set); 3736 nft_set_destroy(set);
3745 }
3746} 3737}
3747EXPORT_SYMBOL_GPL(nf_tables_destroy_set); 3738EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
3748 3739
@@ -4508,6 +4499,8 @@ err6:
4508err5: 4499err5:
4509 kfree(trans); 4500 kfree(trans);
4510err4: 4501err4:
4502 if (obj)
4503 obj->use--;
4511 kfree(elem.priv); 4504 kfree(elem.priv);
4512err3: 4505err3:
4513 if (nla[NFTA_SET_ELEM_DATA] != NULL) 4506 if (nla[NFTA_SET_ELEM_DATA] != NULL)
@@ -6535,6 +6528,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
6535 nf_tables_rule_notify(&trans->ctx, 6528 nf_tables_rule_notify(&trans->ctx,
6536 nft_trans_rule(trans), 6529 nft_trans_rule(trans),
6537 NFT_MSG_DELRULE); 6530 NFT_MSG_DELRULE);
6531 nft_rule_expr_deactivate(&trans->ctx,
6532 nft_trans_rule(trans),
6533 NFT_TRANS_COMMIT);
6538 break; 6534 break;
6539 case NFT_MSG_NEWSET: 6535 case NFT_MSG_NEWSET:
6540 nft_clear(net, nft_trans_set(trans)); 6536 nft_clear(net, nft_trans_set(trans));
@@ -6621,7 +6617,8 @@ static void nf_tables_abort_release(struct nft_trans *trans)
6621 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); 6617 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
6622 break; 6618 break;
6623 case NFT_MSG_NEWSET: 6619 case NFT_MSG_NEWSET:
6624 nft_set_destroy(nft_trans_set(trans)); 6620 if (!nft_trans_set_bound(trans))
6621 nft_set_destroy(nft_trans_set(trans));
6625 break; 6622 break;
6626 case NFT_MSG_NEWSETELEM: 6623 case NFT_MSG_NEWSETELEM:
6627 nft_set_elem_destroy(nft_trans_elem_set(trans), 6624 nft_set_elem_destroy(nft_trans_elem_set(trans),
@@ -6682,7 +6679,9 @@ static int __nf_tables_abort(struct net *net)
6682 case NFT_MSG_NEWRULE: 6679 case NFT_MSG_NEWRULE:
6683 trans->ctx.chain->use--; 6680 trans->ctx.chain->use--;
6684 list_del_rcu(&nft_trans_rule(trans)->list); 6681 list_del_rcu(&nft_trans_rule(trans)->list);
6685 nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans)); 6682 nft_rule_expr_deactivate(&trans->ctx,
6683 nft_trans_rule(trans),
6684 NFT_TRANS_ABORT);
6686 break; 6685 break;
6687 case NFT_MSG_DELRULE: 6686 case NFT_MSG_DELRULE:
6688 trans->ctx.chain->use++; 6687 trans->ctx.chain->use++;
@@ -6692,7 +6691,8 @@ static int __nf_tables_abort(struct net *net)
6692 break; 6691 break;
6693 case NFT_MSG_NEWSET: 6692 case NFT_MSG_NEWSET:
6694 trans->ctx.table->use--; 6693 trans->ctx.table->use--;
6695 list_del_rcu(&nft_trans_set(trans)->list); 6694 if (!nft_trans_set_bound(trans))
6695 list_del_rcu(&nft_trans_set(trans)->list);
6696 break; 6696 break;
6697 case NFT_MSG_DELSET: 6697 case NFT_MSG_DELSET:
6698 trans->ctx.table->use++; 6698 trans->ctx.table->use++;
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 6f41dd74729d..1f1d90c1716b 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -66,6 +66,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
66 int ttl_check, 66 int ttl_check,
67 struct nf_osf_hdr_ctx *ctx) 67 struct nf_osf_hdr_ctx *ctx)
68{ 68{
69 const __u8 *optpinit = ctx->optp;
69 unsigned int check_WSS = 0; 70 unsigned int check_WSS = 0;
70 int fmatch = FMATCH_WRONG; 71 int fmatch = FMATCH_WRONG;
71 int foptsize, optnum; 72 int foptsize, optnum;
@@ -155,6 +156,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
155 } 156 }
156 } 157 }
157 158
159 if (fmatch != FMATCH_OK)
160 ctx->optp = optpinit;
161
158 return fmatch == FMATCH_OK; 162 return fmatch == FMATCH_OK;
159} 163}
160 164
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 7334e0b80a5e..0a4bad55a8aa 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -22,11 +22,15 @@
22#include <linux/netfilter_bridge/ebtables.h> 22#include <linux/netfilter_bridge/ebtables.h>
23#include <linux/netfilter_arp/arp_tables.h> 23#include <linux/netfilter_arp/arp_tables.h>
24#include <net/netfilter/nf_tables.h> 24#include <net/netfilter/nf_tables.h>
25#include <net/netns/generic.h>
25 26
26struct nft_xt { 27struct nft_xt {
27 struct list_head head; 28 struct list_head head;
28 struct nft_expr_ops ops; 29 struct nft_expr_ops ops;
29 unsigned int refcnt; 30 refcount_t refcnt;
31
32 /* used only when transaction mutex is locked */
33 unsigned int listcnt;
30 34
31 /* Unlike other expressions, ops doesn't have static storage duration. 35 /* Unlike other expressions, ops doesn't have static storage duration.
32 * nft core assumes they do. We use kfree_rcu so that nft core can 36 * nft core assumes they do. We use kfree_rcu so that nft core can
@@ -43,10 +47,39 @@ struct nft_xt_match_priv {
43 void *info; 47 void *info;
44}; 48};
45 49
50struct nft_compat_net {
51 struct list_head nft_target_list;
52 struct list_head nft_match_list;
53};
54
55static unsigned int nft_compat_net_id __read_mostly;
56static struct nft_expr_type nft_match_type;
57static struct nft_expr_type nft_target_type;
58
59static struct nft_compat_net *nft_compat_pernet(struct net *net)
60{
61 return net_generic(net, nft_compat_net_id);
62}
63
64static void nft_xt_get(struct nft_xt *xt)
65{
66 /* refcount_inc() warns on 0 -> 1 transition, but we can't
67 * init the reference count to 1 in .select_ops -- we can't
68 * undo such an increase when another expression inside the same
69 * rule fails afterwards.
70 */
71 if (xt->listcnt == 0)
72 refcount_set(&xt->refcnt, 1);
73 else
74 refcount_inc(&xt->refcnt);
75
76 xt->listcnt++;
77}
78
46static bool nft_xt_put(struct nft_xt *xt) 79static bool nft_xt_put(struct nft_xt *xt)
47{ 80{
48 if (--xt->refcnt == 0) { 81 if (refcount_dec_and_test(&xt->refcnt)) {
49 list_del(&xt->head); 82 WARN_ON_ONCE(!list_empty(&xt->head));
50 kfree_rcu(xt, rcu_head); 83 kfree_rcu(xt, rcu_head);
51 return true; 84 return true;
52 } 85 }
@@ -273,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
273 return -EINVAL; 306 return -EINVAL;
274 307
275 nft_xt = container_of(expr->ops, struct nft_xt, ops); 308 nft_xt = container_of(expr->ops, struct nft_xt, ops);
276 nft_xt->refcnt++; 309 nft_xt_get(nft_xt);
277 return 0; 310 return 0;
278} 311}
279 312
@@ -282,6 +315,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
282{ 315{
283 struct xt_target *target = expr->ops->data; 316 struct xt_target *target = expr->ops->data;
284 void *info = nft_expr_priv(expr); 317 void *info = nft_expr_priv(expr);
318 struct module *me = target->me;
285 struct xt_tgdtor_param par; 319 struct xt_tgdtor_param par;
286 320
287 par.net = ctx->net; 321 par.net = ctx->net;
@@ -292,7 +326,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
292 par.target->destroy(&par); 326 par.target->destroy(&par);
293 327
294 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) 328 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
295 module_put(target->me); 329 module_put(me);
296} 330}
297 331
298static int nft_extension_dump_info(struct sk_buff *skb, int attr, 332static int nft_extension_dump_info(struct sk_buff *skb, int attr,
@@ -486,7 +520,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
486 return ret; 520 return ret;
487 521
488 nft_xt = container_of(expr->ops, struct nft_xt, ops); 522 nft_xt = container_of(expr->ops, struct nft_xt, ops);
489 nft_xt->refcnt++; 523 nft_xt_get(nft_xt);
490 return 0; 524 return 0;
491} 525}
492 526
@@ -540,6 +574,18 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
540 __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); 574 __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
541} 575}
542 576
577static void nft_compat_deactivate(const struct nft_ctx *ctx,
578 const struct nft_expr *expr,
579 enum nft_trans_phase phase)
580{
581 struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
582
583 if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) {
584 if (--xt->listcnt == 0)
585 list_del_init(&xt->head);
586 }
587}
588
543static void 589static void
544nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 590nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
545{ 591{
@@ -734,10 +780,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
734 .cb = nfnl_nft_compat_cb, 780 .cb = nfnl_nft_compat_cb,
735}; 781};
736 782
737static LIST_HEAD(nft_match_list);
738
739static struct nft_expr_type nft_match_type;
740
741static bool nft_match_cmp(const struct xt_match *match, 783static bool nft_match_cmp(const struct xt_match *match,
742 const char *name, u32 rev, u32 family) 784 const char *name, u32 rev, u32 family)
743{ 785{
@@ -749,6 +791,7 @@ static const struct nft_expr_ops *
749nft_match_select_ops(const struct nft_ctx *ctx, 791nft_match_select_ops(const struct nft_ctx *ctx,
750 const struct nlattr * const tb[]) 792 const struct nlattr * const tb[])
751{ 793{
794 struct nft_compat_net *cn;
752 struct nft_xt *nft_match; 795 struct nft_xt *nft_match;
753 struct xt_match *match; 796 struct xt_match *match;
754 unsigned int matchsize; 797 unsigned int matchsize;
@@ -765,8 +808,10 @@ nft_match_select_ops(const struct nft_ctx *ctx,
765 rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV])); 808 rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
766 family = ctx->family; 809 family = ctx->family;
767 810
811 cn = nft_compat_pernet(ctx->net);
812
768 /* Re-use the existing match if it's already loaded. */ 813 /* Re-use the existing match if it's already loaded. */
769 list_for_each_entry(nft_match, &nft_match_list, head) { 814 list_for_each_entry(nft_match, &cn->nft_match_list, head) {
770 struct xt_match *match = nft_match->ops.data; 815 struct xt_match *match = nft_match->ops.data;
771 816
772 if (nft_match_cmp(match, mt_name, rev, family)) 817 if (nft_match_cmp(match, mt_name, rev, family))
@@ -789,11 +834,12 @@ nft_match_select_ops(const struct nft_ctx *ctx,
789 goto err; 834 goto err;
790 } 835 }
791 836
792 nft_match->refcnt = 0; 837 refcount_set(&nft_match->refcnt, 0);
793 nft_match->ops.type = &nft_match_type; 838 nft_match->ops.type = &nft_match_type;
794 nft_match->ops.eval = nft_match_eval; 839 nft_match->ops.eval = nft_match_eval;
795 nft_match->ops.init = nft_match_init; 840 nft_match->ops.init = nft_match_init;
796 nft_match->ops.destroy = nft_match_destroy; 841 nft_match->ops.destroy = nft_match_destroy;
842 nft_match->ops.deactivate = nft_compat_deactivate;
797 nft_match->ops.dump = nft_match_dump; 843 nft_match->ops.dump = nft_match_dump;
798 nft_match->ops.validate = nft_match_validate; 844 nft_match->ops.validate = nft_match_validate;
799 nft_match->ops.data = match; 845 nft_match->ops.data = match;
@@ -810,7 +856,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
810 856
811 nft_match->ops.size = matchsize; 857 nft_match->ops.size = matchsize;
812 858
813 list_add(&nft_match->head, &nft_match_list); 859 nft_match->listcnt = 0;
860 list_add(&nft_match->head, &cn->nft_match_list);
814 861
815 return &nft_match->ops; 862 return &nft_match->ops;
816err: 863err:
@@ -826,10 +873,6 @@ static struct nft_expr_type nft_match_type __read_mostly = {
826 .owner = THIS_MODULE, 873 .owner = THIS_MODULE,
827}; 874};
828 875
829static LIST_HEAD(nft_target_list);
830
831static struct nft_expr_type nft_target_type;
832
833static bool nft_target_cmp(const struct xt_target *tg, 876static bool nft_target_cmp(const struct xt_target *tg,
834 const char *name, u32 rev, u32 family) 877 const char *name, u32 rev, u32 family)
835{ 878{
@@ -841,6 +884,7 @@ static const struct nft_expr_ops *
841nft_target_select_ops(const struct nft_ctx *ctx, 884nft_target_select_ops(const struct nft_ctx *ctx,
842 const struct nlattr * const tb[]) 885 const struct nlattr * const tb[])
843{ 886{
887 struct nft_compat_net *cn;
844 struct nft_xt *nft_target; 888 struct nft_xt *nft_target;
845 struct xt_target *target; 889 struct xt_target *target;
846 char *tg_name; 890 char *tg_name;
@@ -861,8 +905,9 @@ nft_target_select_ops(const struct nft_ctx *ctx,
861 strcmp(tg_name, "standard") == 0) 905 strcmp(tg_name, "standard") == 0)
862 return ERR_PTR(-EINVAL); 906 return ERR_PTR(-EINVAL);
863 907
908 cn = nft_compat_pernet(ctx->net);
864 /* Re-use the existing target if it's already loaded. */ 909 /* Re-use the existing target if it's already loaded. */
865 list_for_each_entry(nft_target, &nft_target_list, head) { 910 list_for_each_entry(nft_target, &cn->nft_target_list, head) {
866 struct xt_target *target = nft_target->ops.data; 911 struct xt_target *target = nft_target->ops.data;
867 912
868 if (!target->target) 913 if (!target->target)
@@ -893,11 +938,12 @@ nft_target_select_ops(const struct nft_ctx *ctx,
893 goto err; 938 goto err;
894 } 939 }
895 940
896 nft_target->refcnt = 0; 941 refcount_set(&nft_target->refcnt, 0);
897 nft_target->ops.type = &nft_target_type; 942 nft_target->ops.type = &nft_target_type;
898 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); 943 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
899 nft_target->ops.init = nft_target_init; 944 nft_target->ops.init = nft_target_init;
900 nft_target->ops.destroy = nft_target_destroy; 945 nft_target->ops.destroy = nft_target_destroy;
946 nft_target->ops.deactivate = nft_compat_deactivate;
901 nft_target->ops.dump = nft_target_dump; 947 nft_target->ops.dump = nft_target_dump;
902 nft_target->ops.validate = nft_target_validate; 948 nft_target->ops.validate = nft_target_validate;
903 nft_target->ops.data = target; 949 nft_target->ops.data = target;
@@ -907,7 +953,8 @@ nft_target_select_ops(const struct nft_ctx *ctx,
907 else 953 else
908 nft_target->ops.eval = nft_target_eval_xt; 954 nft_target->ops.eval = nft_target_eval_xt;
909 955
910 list_add(&nft_target->head, &nft_target_list); 956 nft_target->listcnt = 0;
957 list_add(&nft_target->head, &cn->nft_target_list);
911 958
912 return &nft_target->ops; 959 return &nft_target->ops;
913err: 960err:
@@ -923,13 +970,74 @@ static struct nft_expr_type nft_target_type __read_mostly = {
923 .owner = THIS_MODULE, 970 .owner = THIS_MODULE,
924}; 971};
925 972
973static int __net_init nft_compat_init_net(struct net *net)
974{
975 struct nft_compat_net *cn = nft_compat_pernet(net);
976
977 INIT_LIST_HEAD(&cn->nft_target_list);
978 INIT_LIST_HEAD(&cn->nft_match_list);
979
980 return 0;
981}
982
983static void __net_exit nft_compat_exit_net(struct net *net)
984{
985 struct nft_compat_net *cn = nft_compat_pernet(net);
986 struct nft_xt *xt, *next;
987
988 if (list_empty(&cn->nft_match_list) &&
989 list_empty(&cn->nft_target_list))
990 return;
991
992 /* If there was an error that caused nft_xt expr to not be initialized
993 * fully and noone else requested the same expression later, the lists
994 * contain 0-refcount entries that still hold module reference.
995 *
996 * Clean them here.
997 */
998 mutex_lock(&net->nft.commit_mutex);
999 list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) {
1000 struct xt_target *target = xt->ops.data;
1001
1002 list_del_init(&xt->head);
1003
1004 if (refcount_read(&xt->refcnt))
1005 continue;
1006 module_put(target->me);
1007 kfree(xt);
1008 }
1009
1010 list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) {
1011 struct xt_match *match = xt->ops.data;
1012
1013 list_del_init(&xt->head);
1014
1015 if (refcount_read(&xt->refcnt))
1016 continue;
1017 module_put(match->me);
1018 kfree(xt);
1019 }
1020 mutex_unlock(&net->nft.commit_mutex);
1021}
1022
1023static struct pernet_operations nft_compat_net_ops = {
1024 .init = nft_compat_init_net,
1025 .exit = nft_compat_exit_net,
1026 .id = &nft_compat_net_id,
1027 .size = sizeof(struct nft_compat_net),
1028};
1029
926static int __init nft_compat_module_init(void) 1030static int __init nft_compat_module_init(void)
927{ 1031{
928 int ret; 1032 int ret;
929 1033
1034 ret = register_pernet_subsys(&nft_compat_net_ops);
1035 if (ret < 0)
1036 goto err_target;
1037
930 ret = nft_register_expr(&nft_match_type); 1038 ret = nft_register_expr(&nft_match_type);
931 if (ret < 0) 1039 if (ret < 0)
932 return ret; 1040 goto err_pernet;
933 1041
934 ret = nft_register_expr(&nft_target_type); 1042 ret = nft_register_expr(&nft_target_type);
935 if (ret < 0) 1043 if (ret < 0)
@@ -942,45 +1050,21 @@ static int __init nft_compat_module_init(void)
942 } 1050 }
943 1051
944 return ret; 1052 return ret;
945
946err_target: 1053err_target:
947 nft_unregister_expr(&nft_target_type); 1054 nft_unregister_expr(&nft_target_type);
948err_match: 1055err_match:
949 nft_unregister_expr(&nft_match_type); 1056 nft_unregister_expr(&nft_match_type);
1057err_pernet:
1058 unregister_pernet_subsys(&nft_compat_net_ops);
950 return ret; 1059 return ret;
951} 1060}
952 1061
953static void __exit nft_compat_module_exit(void) 1062static void __exit nft_compat_module_exit(void)
954{ 1063{
955 struct nft_xt *xt, *next;
956
957 /* list should be empty here, it can be non-empty only in case there
958 * was an error that caused nft_xt expr to not be initialized fully
959 * and noone else requested the same expression later.
960 *
961 * In this case, the lists contain 0-refcount entries that still
962 * hold module reference.
963 */
964 list_for_each_entry_safe(xt, next, &nft_target_list, head) {
965 struct xt_target *target = xt->ops.data;
966
967 if (WARN_ON_ONCE(xt->refcnt))
968 continue;
969 module_put(target->me);
970 kfree(xt);
971 }
972
973 list_for_each_entry_safe(xt, next, &nft_match_list, head) {
974 struct xt_match *match = xt->ops.data;
975
976 if (WARN_ON_ONCE(xt->refcnt))
977 continue;
978 module_put(match->me);
979 kfree(xt);
980 }
981 nfnetlink_subsys_unregister(&nfnl_compat_subsys); 1064 nfnetlink_subsys_unregister(&nfnl_compat_subsys);
982 nft_unregister_expr(&nft_target_type); 1065 nft_unregister_expr(&nft_target_type);
983 nft_unregister_expr(&nft_match_type); 1066 nft_unregister_expr(&nft_match_type);
1067 unregister_pernet_subsys(&nft_compat_net_ops);
984} 1068}
985 1069
986MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT); 1070MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 07d4efd3d851..f1172f99752b 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -235,20 +235,17 @@ err1:
235 return err; 235 return err;
236} 236}
237 237
238static void nft_dynset_activate(const struct nft_ctx *ctx,
239 const struct nft_expr *expr)
240{
241 struct nft_dynset *priv = nft_expr_priv(expr);
242
243 nf_tables_rebind_set(ctx, priv->set, &priv->binding);
244}
245
246static void nft_dynset_deactivate(const struct nft_ctx *ctx, 238static void nft_dynset_deactivate(const struct nft_ctx *ctx,
247 const struct nft_expr *expr) 239 const struct nft_expr *expr,
240 enum nft_trans_phase phase)
248{ 241{
249 struct nft_dynset *priv = nft_expr_priv(expr); 242 struct nft_dynset *priv = nft_expr_priv(expr);
250 243
251 nf_tables_unbind_set(ctx, priv->set, &priv->binding); 244 if (phase == NFT_TRANS_PREPARE)
245 return;
246
247 nf_tables_unbind_set(ctx, priv->set, &priv->binding,
248 phase == NFT_TRANS_COMMIT);
252} 249}
253 250
254static void nft_dynset_destroy(const struct nft_ctx *ctx, 251static void nft_dynset_destroy(const struct nft_ctx *ctx,
@@ -296,7 +293,6 @@ static const struct nft_expr_ops nft_dynset_ops = {
296 .eval = nft_dynset_eval, 293 .eval = nft_dynset_eval,
297 .init = nft_dynset_init, 294 .init = nft_dynset_init,
298 .destroy = nft_dynset_destroy, 295 .destroy = nft_dynset_destroy,
299 .activate = nft_dynset_activate,
300 .deactivate = nft_dynset_deactivate, 296 .deactivate = nft_dynset_deactivate,
301 .dump = nft_dynset_dump, 297 .dump = nft_dynset_dump,
302}; 298};
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 974525eb92df..6e6b9adf7d38 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -12,6 +12,7 @@
12#include <net/netfilter/nf_conntrack_core.h> 12#include <net/netfilter/nf_conntrack_core.h>
13#include <linux/netfilter/nf_conntrack_common.h> 13#include <linux/netfilter/nf_conntrack_common.h>
14#include <net/netfilter/nf_flow_table.h> 14#include <net/netfilter/nf_flow_table.h>
15#include <net/netfilter/nf_conntrack_helper.h>
15 16
16struct nft_flow_offload { 17struct nft_flow_offload {
17 struct nft_flowtable *flowtable; 18 struct nft_flowtable *flowtable;
@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
29 memset(&fl, 0, sizeof(fl)); 30 memset(&fl, 0, sizeof(fl));
30 switch (nft_pf(pkt)) { 31 switch (nft_pf(pkt)) {
31 case NFPROTO_IPV4: 32 case NFPROTO_IPV4:
32 fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip; 33 fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
34 fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
33 break; 35 break;
34 case NFPROTO_IPV6: 36 case NFPROTO_IPV6:
35 fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6; 37 fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
38 fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
36 break; 39 break;
37 } 40 }
38 41
@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
41 return -ENOENT; 44 return -ENOENT;
42 45
43 route->tuple[dir].dst = this_dst; 46 route->tuple[dir].dst = this_dst;
44 route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
45 route->tuple[!dir].dst = other_dst; 47 route->tuple[!dir].dst = other_dst;
46 route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
47 48
48 return 0; 49 return 0;
49} 50}
@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
66{ 67{
67 struct nft_flow_offload *priv = nft_expr_priv(expr); 68 struct nft_flow_offload *priv = nft_expr_priv(expr);
68 struct nf_flowtable *flowtable = &priv->flowtable->data; 69 struct nf_flowtable *flowtable = &priv->flowtable->data;
70 const struct nf_conn_help *help;
69 enum ip_conntrack_info ctinfo; 71 enum ip_conntrack_info ctinfo;
70 struct nf_flow_route route; 72 struct nf_flow_route route;
71 struct flow_offload *flow; 73 struct flow_offload *flow;
@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
88 goto out; 90 goto out;
89 } 91 }
90 92
91 if (test_bit(IPS_HELPER_BIT, &ct->status)) 93 help = nfct_help(ct);
94 if (help)
92 goto out; 95 goto out;
93 96
94 if (ctinfo == IP_CT_NEW || 97 if (ctinfo == IP_CT_NEW ||
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 0777a93211e2..3f6d1d2a6281 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
72} 72}
73 73
74static void nft_immediate_deactivate(const struct nft_ctx *ctx, 74static void nft_immediate_deactivate(const struct nft_ctx *ctx,
75 const struct nft_expr *expr) 75 const struct nft_expr *expr,
76 enum nft_trans_phase phase)
76{ 77{
77 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 78 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
78 79
80 if (phase == NFT_TRANS_COMMIT)
81 return;
82
79 return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); 83 return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
80} 84}
81 85
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 227b2b15a19c..14496da5141d 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
121 return 0; 121 return 0;
122} 122}
123 123
124static void nft_lookup_activate(const struct nft_ctx *ctx,
125 const struct nft_expr *expr)
126{
127 struct nft_lookup *priv = nft_expr_priv(expr);
128
129 nf_tables_rebind_set(ctx, priv->set, &priv->binding);
130}
131
132static void nft_lookup_deactivate(const struct nft_ctx *ctx, 124static void nft_lookup_deactivate(const struct nft_ctx *ctx,
133 const struct nft_expr *expr) 125 const struct nft_expr *expr,
126 enum nft_trans_phase phase)
134{ 127{
135 struct nft_lookup *priv = nft_expr_priv(expr); 128 struct nft_lookup *priv = nft_expr_priv(expr);
136 129
137 nf_tables_unbind_set(ctx, priv->set, &priv->binding); 130 if (phase == NFT_TRANS_PREPARE)
131 return;
132
133 nf_tables_unbind_set(ctx, priv->set, &priv->binding,
134 phase == NFT_TRANS_COMMIT);
138} 135}
139 136
140static void nft_lookup_destroy(const struct nft_ctx *ctx, 137static void nft_lookup_destroy(const struct nft_ctx *ctx,
@@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = {
225 .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), 222 .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
226 .eval = nft_lookup_eval, 223 .eval = nft_lookup_eval,
227 .init = nft_lookup_init, 224 .init = nft_lookup_init,
228 .activate = nft_lookup_activate,
229 .deactivate = nft_lookup_deactivate, 225 .deactivate = nft_lookup_deactivate,
230 .destroy = nft_lookup_destroy, 226 .destroy = nft_lookup_destroy,
231 .dump = nft_lookup_dump, 227 .dump = nft_lookup_dump,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index a3185ca2a3a9..ae178e914486 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -155,20 +155,17 @@ nla_put_failure:
155 return -1; 155 return -1;
156} 156}
157 157
158static void nft_objref_map_activate(const struct nft_ctx *ctx,
159 const struct nft_expr *expr)
160{
161 struct nft_objref_map *priv = nft_expr_priv(expr);
162
163 nf_tables_rebind_set(ctx, priv->set, &priv->binding);
164}
165
166static void nft_objref_map_deactivate(const struct nft_ctx *ctx, 158static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
167 const struct nft_expr *expr) 159 const struct nft_expr *expr,
160 enum nft_trans_phase phase)
168{ 161{
169 struct nft_objref_map *priv = nft_expr_priv(expr); 162 struct nft_objref_map *priv = nft_expr_priv(expr);
170 163
171 nf_tables_unbind_set(ctx, priv->set, &priv->binding); 164 if (phase == NFT_TRANS_PREPARE)
165 return;
166
167 nf_tables_unbind_set(ctx, priv->set, &priv->binding,
168 phase == NFT_TRANS_COMMIT);
172} 169}
173 170
174static void nft_objref_map_destroy(const struct nft_ctx *ctx, 171static void nft_objref_map_destroy(const struct nft_ctx *ctx,
@@ -185,7 +182,6 @@ static const struct nft_expr_ops nft_objref_map_ops = {
185 .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), 182 .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
186 .eval = nft_objref_map_eval, 183 .eval = nft_objref_map_eval,
187 .init = nft_objref_map_init, 184 .init = nft_objref_map_init,
188 .activate = nft_objref_map_activate,
189 .deactivate = nft_objref_map_deactivate, 185 .deactivate = nft_objref_map_deactivate,
190 .destroy = nft_objref_map_destroy, 186 .destroy = nft_objref_map_destroy,
191 .dump = nft_objref_map_dump, 187 .dump = nft_objref_map_dump,
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index aecadd471e1d..13e1ac333fa4 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1899,7 +1899,7 @@ static int __init xt_init(void)
1899 seqcount_init(&per_cpu(xt_recseq, i)); 1899 seqcount_init(&per_cpu(xt_recseq, i));
1900 } 1900 }
1901 1901
1902 xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); 1902 xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1903 if (!xt) 1903 if (!xt)
1904 return -ENOMEM; 1904 return -ENOMEM;
1905 1905
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index cbd51ed5a2d7..908e53ab47a4 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -52,21 +52,21 @@ void nr_start_t1timer(struct sock *sk)
52{ 52{
53 struct nr_sock *nr = nr_sk(sk); 53 struct nr_sock *nr = nr_sk(sk);
54 54
55 mod_timer(&nr->t1timer, jiffies + nr->t1); 55 sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
56} 56}
57 57
58void nr_start_t2timer(struct sock *sk) 58void nr_start_t2timer(struct sock *sk)
59{ 59{
60 struct nr_sock *nr = nr_sk(sk); 60 struct nr_sock *nr = nr_sk(sk);
61 61
62 mod_timer(&nr->t2timer, jiffies + nr->t2); 62 sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
63} 63}
64 64
65void nr_start_t4timer(struct sock *sk) 65void nr_start_t4timer(struct sock *sk)
66{ 66{
67 struct nr_sock *nr = nr_sk(sk); 67 struct nr_sock *nr = nr_sk(sk);
68 68
69 mod_timer(&nr->t4timer, jiffies + nr->t4); 69 sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
70} 70}
71 71
72void nr_start_idletimer(struct sock *sk) 72void nr_start_idletimer(struct sock *sk)
@@ -74,37 +74,37 @@ void nr_start_idletimer(struct sock *sk)
74 struct nr_sock *nr = nr_sk(sk); 74 struct nr_sock *nr = nr_sk(sk);
75 75
76 if (nr->idle > 0) 76 if (nr->idle > 0)
77 mod_timer(&nr->idletimer, jiffies + nr->idle); 77 sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
78} 78}
79 79
80void nr_start_heartbeat(struct sock *sk) 80void nr_start_heartbeat(struct sock *sk)
81{ 81{
82 mod_timer(&sk->sk_timer, jiffies + 5 * HZ); 82 sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
83} 83}
84 84
85void nr_stop_t1timer(struct sock *sk) 85void nr_stop_t1timer(struct sock *sk)
86{ 86{
87 del_timer(&nr_sk(sk)->t1timer); 87 sk_stop_timer(sk, &nr_sk(sk)->t1timer);
88} 88}
89 89
90void nr_stop_t2timer(struct sock *sk) 90void nr_stop_t2timer(struct sock *sk)
91{ 91{
92 del_timer(&nr_sk(sk)->t2timer); 92 sk_stop_timer(sk, &nr_sk(sk)->t2timer);
93} 93}
94 94
95void nr_stop_t4timer(struct sock *sk) 95void nr_stop_t4timer(struct sock *sk)
96{ 96{
97 del_timer(&nr_sk(sk)->t4timer); 97 sk_stop_timer(sk, &nr_sk(sk)->t4timer);
98} 98}
99 99
100void nr_stop_idletimer(struct sock *sk) 100void nr_stop_idletimer(struct sock *sk)
101{ 101{
102 del_timer(&nr_sk(sk)->idletimer); 102 sk_stop_timer(sk, &nr_sk(sk)->idletimer);
103} 103}
104 104
105void nr_stop_heartbeat(struct sock *sk) 105void nr_stop_heartbeat(struct sock *sk)
106{ 106{
107 del_timer(&sk->sk_timer); 107 sk_stop_timer(sk, &sk->sk_timer);
108} 108}
109 109
110int nr_t1timer_running(struct sock *sk) 110int nr_t1timer_running(struct sock *sk)
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 57e07768c9d1..f54cf17ef7a8 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -276,10 +276,12 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
276 276
277 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); 277 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
278 if (flags & IP6_FH_F_FRAG) { 278 if (flags & IP6_FH_F_FRAG) {
279 if (frag_off) 279 if (frag_off) {
280 key->ip.frag = OVS_FRAG_TYPE_LATER; 280 key->ip.frag = OVS_FRAG_TYPE_LATER;
281 else 281 key->ip.proto = nexthdr;
282 key->ip.frag = OVS_FRAG_TYPE_FIRST; 282 return 0;
283 }
284 key->ip.frag = OVS_FRAG_TYPE_FIRST;
283 } else { 285 } else {
284 key->ip.frag = OVS_FRAG_TYPE_NONE; 286 key->ip.frag = OVS_FRAG_TYPE_NONE;
285 } 287 }
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 435a4bdf8f89..691da853bef5 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
500 return -EINVAL; 500 return -EINVAL;
501 } 501 }
502 502
503 if (!nz || !is_all_zero(nla_data(nla), expected_len)) { 503 if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
504 attrs |= 1 << type; 504 attrs |= 1 << type;
505 a[type] = nla; 505 a[type] = nla;
506 } 506 }
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index eedacdebcd4c..1cd1d83a4be0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2628,7 +2628,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2628 addr = saddr->sll_halen ? saddr->sll_addr : NULL; 2628 addr = saddr->sll_halen ? saddr->sll_addr : NULL;
2629 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2629 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2630 if (addr && dev && saddr->sll_halen < dev->addr_len) 2630 if (addr && dev && saddr->sll_halen < dev->addr_len)
2631 goto out; 2631 goto out_put;
2632 } 2632 }
2633 2633
2634 err = -ENXIO; 2634 err = -ENXIO;
@@ -2828,7 +2828,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2828 addr = saddr->sll_halen ? saddr->sll_addr : NULL; 2828 addr = saddr->sll_halen ? saddr->sll_addr : NULL;
2829 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2829 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2830 if (addr && dev && saddr->sll_halen < dev->addr_len) 2830 if (addr && dev && saddr->sll_halen < dev->addr_len)
2831 goto out; 2831 goto out_unlock;
2832 } 2832 }
2833 2833
2834 err = -ENXIO; 2834 err = -ENXIO;
@@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2887 goto out_free; 2887 goto out_free;
2888 } else if (reserve) { 2888 } else if (reserve) {
2889 skb_reserve(skb, -reserve); 2889 skb_reserve(skb, -reserve);
2890 if (len < reserve) 2890 if (len < reserve + sizeof(struct ipv6hdr) &&
2891 dev->min_header_len != dev->hard_header_len)
2891 skb_reset_network_header(skb); 2892 skb_reset_network_header(skb);
2892 } 2893 }
2893 2894
@@ -4291,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4291 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4292 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4292 if (unlikely(rb->frames_per_block == 0)) 4293 if (unlikely(rb->frames_per_block == 0))
4293 goto out; 4294 goto out;
4294 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) 4295 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4295 goto out; 4296 goto out;
4296 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4297 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4297 req->tp_frame_nr)) 4298 req->tp_frame_nr))
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9fc76b19cd3c..db3473540303 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code,
132 ph->utid = 0; 132 ph->utid = 0;
133 ph->message_id = id; 133 ph->message_id = id;
134 ph->pipe_handle = pn->pipe_handle; 134 ph->pipe_handle = pn->pipe_handle;
135 ph->data[0] = code; 135 ph->error_code = code;
136 return pn_skb_send(sk, skb, NULL); 136 return pn_skb_send(sk, skb, NULL);
137} 137}
138 138
@@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
153 ph->utid = id; /* whatever */ 153 ph->utid = id; /* whatever */
154 ph->message_id = id; 154 ph->message_id = id;
155 ph->pipe_handle = pn->pipe_handle; 155 ph->pipe_handle = pn->pipe_handle;
156 ph->data[0] = code; 156 ph->error_code = code;
157 return pn_skb_send(sk, skb, NULL); 157 return pn_skb_send(sk, skb, NULL);
158} 158}
159 159
@@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
208 struct pnpipehdr *ph; 208 struct pnpipehdr *ph;
209 struct sockaddr_pn dst; 209 struct sockaddr_pn dst;
210 u8 data[4] = { 210 u8 data[4] = {
211 oph->data[0], /* PEP type */ 211 oph->pep_type, /* PEP type */
212 code, /* error code, at an unusual offset */ 212 code, /* error code, at an unusual offset */
213 PAD, PAD, 213 PAD, PAD,
214 }; 214 };
@@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
221 ph->utid = oph->utid; 221 ph->utid = oph->utid;
222 ph->message_id = PNS_PEP_CTRL_RESP; 222 ph->message_id = PNS_PEP_CTRL_RESP;
223 ph->pipe_handle = oph->pipe_handle; 223 ph->pipe_handle = oph->pipe_handle;
224 ph->data[0] = oph->data[1]; /* CTRL id */ 224 ph->data0 = oph->data[0]; /* CTRL id */
225 225
226 pn_skb_get_src_sockaddr(oskb, &dst); 226 pn_skb_get_src_sockaddr(oskb, &dst);
227 return pn_skb_send(sk, skb, &dst); 227 return pn_skb_send(sk, skb, &dst);
@@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
272 return -EINVAL; 272 return -EINVAL;
273 273
274 hdr = pnp_hdr(skb); 274 hdr = pnp_hdr(skb);
275 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 275 if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
276 net_dbg_ratelimited("Phonet unknown PEP type: %u\n", 276 net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
277 (unsigned int)hdr->data[0]); 277 (unsigned int)hdr->pep_type);
278 return -EOPNOTSUPP; 278 return -EOPNOTSUPP;
279 } 279 }
280 280
281 switch (hdr->data[1]) { 281 switch (hdr->data[0]) {
282 case PN_PEP_IND_FLOW_CONTROL: 282 case PN_PEP_IND_FLOW_CONTROL:
283 switch (pn->tx_fc) { 283 switch (pn->tx_fc) {
284 case PN_LEGACY_FLOW_CONTROL: 284 case PN_LEGACY_FLOW_CONTROL:
285 switch (hdr->data[4]) { 285 switch (hdr->data[3]) {
286 case PEP_IND_BUSY: 286 case PEP_IND_BUSY:
287 atomic_set(&pn->tx_credits, 0); 287 atomic_set(&pn->tx_credits, 0);
288 break; 288 break;
@@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
292 } 292 }
293 break; 293 break;
294 case PN_ONE_CREDIT_FLOW_CONTROL: 294 case PN_ONE_CREDIT_FLOW_CONTROL:
295 if (hdr->data[4] == PEP_IND_READY) 295 if (hdr->data[3] == PEP_IND_READY)
296 atomic_set(&pn->tx_credits, wake = 1); 296 atomic_set(&pn->tx_credits, wake = 1);
297 break; 297 break;
298 } 298 }
@@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
301 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: 301 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
302 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) 302 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
303 break; 303 break;
304 atomic_add(wake = hdr->data[4], &pn->tx_credits); 304 atomic_add(wake = hdr->data[3], &pn->tx_credits);
305 break; 305 break;
306 306
307 default: 307 default:
308 net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", 308 net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
309 (unsigned int)hdr->data[1]); 309 (unsigned int)hdr->data[0]);
310 return -EOPNOTSUPP; 310 return -EOPNOTSUPP;
311 } 311 }
312 if (wake) 312 if (wake)
@@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
318{ 318{
319 struct pep_sock *pn = pep_sk(sk); 319 struct pep_sock *pn = pep_sk(sk);
320 struct pnpipehdr *hdr = pnp_hdr(skb); 320 struct pnpipehdr *hdr = pnp_hdr(skb);
321 u8 n_sb = hdr->data[0]; 321 u8 n_sb = hdr->data0;
322 322
323 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; 323 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
324 __skb_pull(skb, sizeof(*hdr)); 324 __skb_pull(skb, sizeof(*hdr));
@@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
506 return -ECONNREFUSED; 506 return -ECONNREFUSED;
507 507
508 /* Parse sub-blocks */ 508 /* Parse sub-blocks */
509 n_sb = hdr->data[4]; 509 n_sb = hdr->data[3];
510 while (n_sb > 0) { 510 while (n_sb > 0) {
511 u8 type, buf[6], len = sizeof(buf); 511 u8 type, buf[6], len = sizeof(buf);
512 const u8 *data = pep_get_sb(skb, &type, &len, buf); 512 const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk)
739 ph->utid = 0; 739 ph->utid = 0;
740 ph->message_id = PNS_PIPE_REMOVE_REQ; 740 ph->message_id = PNS_PIPE_REMOVE_REQ;
741 ph->pipe_handle = pn->pipe_handle; 741 ph->pipe_handle = pn->pipe_handle;
742 ph->data[0] = PAD; 742 ph->data0 = PAD;
743 return pn_skb_send(sk, skb, NULL); 743 return pn_skb_send(sk, skb, NULL);
744} 744}
745 745
@@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
817 peer_type = hdr->other_pep_type << 8; 817 peer_type = hdr->other_pep_type << 8;
818 818
819 /* Parse sub-blocks (options) */ 819 /* Parse sub-blocks (options) */
820 n_sb = hdr->data[4]; 820 n_sb = hdr->data[3];
821 while (n_sb > 0) { 821 while (n_sb > 0) {
822 u8 type, buf[1], len = sizeof(buf); 822 u8 type, buf[1], len = sizeof(buf);
823 const u8 *data = pep_get_sb(skb, &type, &len, buf); 823 const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
1109 ph->utid = 0; 1109 ph->utid = 0;
1110 if (pn->aligned) { 1110 if (pn->aligned) {
1111 ph->message_id = PNS_PIPE_ALIGNED_DATA; 1111 ph->message_id = PNS_PIPE_ALIGNED_DATA;
1112 ph->data[0] = 0; /* padding */ 1112 ph->data0 = 0; /* padding */
1113 } else 1113 } else
1114 ph->message_id = PNS_PIPE_DATA; 1114 ph->message_id = PNS_PIPE_DATA;
1115 ph->pipe_handle = pn->pipe_handle; 1115 ph->pipe_handle = pn->pipe_handle;
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 762d2c6788a3..17c9d9f0c848 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
78 __rds_create_bind_key(key, addr, port, scope_id); 78 __rds_create_bind_key(key, addr, port, scope_id);
79 rcu_read_lock(); 79 rcu_read_lock();
80 rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); 80 rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
81 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) 81 if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
82 rds_sock_addref(rs); 82 !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
83 else
84 rs = NULL; 83 rs = NULL;
84
85 rcu_read_unlock(); 85 rcu_read_unlock();
86 86
87 rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, 87 rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 2dcb555e6350..4e0c36acf866 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -522,7 +522,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
522 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) 522 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
523 i = 1; 523 i = 1;
524 else 524 else
525 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); 525 i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
526 526
527 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 527 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
528 if (work_alloc == 0) { 528 if (work_alloc == 0) {
@@ -879,7 +879,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
879 * Instead of knowing how to return a partial rdma read/write we insist that there 879 * Instead of knowing how to return a partial rdma read/write we insist that there
880 * be enough work requests to send the entire message. 880 * be enough work requests to send the entire message.
881 */ 881 */
882 i = ceil(op->op_count, max_sge); 882 i = DIV_ROUND_UP(op->op_count, max_sge);
883 883
884 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 884 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
885 if (work_alloc != i) { 885 if (work_alloc != i) {
diff --git a/net/rds/message.c b/net/rds/message.c
index f139420ba1f6..50f13f1d4ae0 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -341,7 +341,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
341{ 341{
342 struct rds_message *rm; 342 struct rds_message *rm;
343 unsigned int i; 343 unsigned int i;
344 int num_sgs = ceil(total_len, PAGE_SIZE); 344 int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
345 int extra_bytes = num_sgs * sizeof(struct scatterlist); 345 int extra_bytes = num_sgs * sizeof(struct scatterlist);
346 int ret; 346 int ret;
347 347
@@ -351,7 +351,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
351 351
352 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); 352 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
353 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 353 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
354 rm->data.op_nents = ceil(total_len, PAGE_SIZE); 354 rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
355 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); 355 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
356 if (!rm->data.op_sg) { 356 if (!rm->data.op_sg) {
357 rds_message_put(rm); 357 rds_message_put(rm);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 02ec4a3b2799..4ffe100ff5e6 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -48,10 +48,6 @@ void rdsdebug(char *fmt, ...)
48} 48}
49#endif 49#endif
50 50
51/* XXX is there one of these somewhere? */
52#define ceil(x, y) \
53 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
54
55#define RDS_FRAG_SHIFT 12 51#define RDS_FRAG_SHIFT 12
56#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) 52#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
57 53
diff --git a/net/rds/send.c b/net/rds/send.c
index 3d822bad7de9..fd8b687d5c05 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1107,7 +1107,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1107 size_t total_payload_len = payload_len, rdma_payload_len = 0; 1107 size_t total_payload_len = payload_len, rdma_payload_len = 0;
1108 bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && 1108 bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
1109 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); 1109 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
1110 int num_sgs = ceil(payload_len, PAGE_SIZE); 1110 int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
1111 int namelen; 1111 int namelen;
1112 struct rds_iov_vector_arr vct; 1112 struct rds_iov_vector_arr vct;
1113 int ind; 1113 int ind;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 77e9f85a2c92..f2ff21d7df08 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -850,6 +850,7 @@ void rose_link_device_down(struct net_device *dev)
850 850
851/* 851/*
852 * Route a frame to an appropriate AX.25 connection. 852 * Route a frame to an appropriate AX.25 connection.
853 * A NULL ax25_cb indicates an internally generated frame.
853 */ 854 */
854int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) 855int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
855{ 856{
@@ -867,6 +868,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
867 868
868 if (skb->len < ROSE_MIN_LEN) 869 if (skb->len < ROSE_MIN_LEN)
869 return res; 870 return res;
871
872 if (!ax25)
873 return rose_loopback_queue(skb, NULL);
874
870 frametype = skb->data[2]; 875 frametype = skb->data[2];
871 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 876 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
872 if (frametype == ROSE_CALL_REQUEST && 877 if (frametype == ROSE_CALL_REQUEST &&
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index a2522f9d71e2..96f2952bbdfd 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -419,76 +419,6 @@ u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
419EXPORT_SYMBOL(rxrpc_kernel_get_epoch); 419EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
420 420
421/** 421/**
422 * rxrpc_kernel_check_call - Check a call's state
423 * @sock: The socket the call is on
424 * @call: The call to check
425 * @_compl: Where to store the completion state
426 * @_abort_code: Where to store any abort code
427 *
428 * Allow a kernel service to query the state of a call and find out the manner
429 * of its termination if it has completed. Returns -EINPROGRESS if the call is
430 * still going, 0 if the call finished successfully, -ECONNABORTED if the call
431 * was aborted and an appropriate error if the call failed in some other way.
432 */
433int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
434 enum rxrpc_call_completion *_compl, u32 *_abort_code)
435{
436 if (call->state != RXRPC_CALL_COMPLETE)
437 return -EINPROGRESS;
438 smp_rmb();
439 *_compl = call->completion;
440 *_abort_code = call->abort_code;
441 return call->error;
442}
443EXPORT_SYMBOL(rxrpc_kernel_check_call);
444
445/**
446 * rxrpc_kernel_retry_call - Allow a kernel service to retry a call
447 * @sock: The socket the call is on
448 * @call: The call to retry
449 * @srx: The address of the peer to contact
450 * @key: The security context to use (defaults to socket setting)
451 *
452 * Allow a kernel service to try resending a client call that failed due to a
453 * network error to a new address. The Tx queue is maintained intact, thereby
454 * relieving the need to re-encrypt any request data that has already been
455 * buffered.
456 */
457int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call,
458 struct sockaddr_rxrpc *srx, struct key *key)
459{
460 struct rxrpc_conn_parameters cp;
461 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
462 int ret;
463
464 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
465
466 if (!key)
467 key = rx->key;
468 if (key && !key->payload.data[0])
469 key = NULL; /* a no-security key */
470
471 memset(&cp, 0, sizeof(cp));
472 cp.local = rx->local;
473 cp.key = key;
474 cp.security_level = 0;
475 cp.exclusive = false;
476 cp.service_id = srx->srx_service;
477
478 mutex_lock(&call->user_mutex);
479
480 ret = rxrpc_prepare_call_for_retry(rx, call);
481 if (ret == 0)
482 ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL);
483
484 mutex_unlock(&call->user_mutex);
485 rxrpc_put_peer(cp.peer);
486 _leave(" = %d", ret);
487 return ret;
488}
489EXPORT_SYMBOL(rxrpc_kernel_retry_call);
490
491/**
492 * rxrpc_kernel_new_call_notification - Get notifications of new calls 422 * rxrpc_kernel_new_call_notification - Get notifications of new calls
493 * @sock: The socket to intercept received messages on 423 * @sock: The socket to intercept received messages on
494 * @notify_new_call: Function to be called when new calls appear 424 * @notify_new_call: Function to be called when new calls appear
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index bc628acf4f4f..4b1a534d290a 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -476,7 +476,6 @@ enum rxrpc_call_flag {
476 RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ 476 RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
477 RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ 477 RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
478 RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ 478 RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
479 RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */
480 RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ 479 RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
481 RXRPC_CALL_PINGING, /* Ping in process */ 480 RXRPC_CALL_PINGING, /* Ping in process */
482 RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ 481 RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
@@ -518,6 +517,18 @@ enum rxrpc_call_state {
518}; 517};
519 518
520/* 519/*
520 * Call completion condition (state == RXRPC_CALL_COMPLETE).
521 */
522enum rxrpc_call_completion {
523 RXRPC_CALL_SUCCEEDED, /* - Normal termination */
524 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
525 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
526 RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
527 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
528 NR__RXRPC_CALL_COMPLETIONS
529};
530
531/*
521 * Call Tx congestion management modes. 532 * Call Tx congestion management modes.
522 */ 533 */
523enum rxrpc_congest_mode { 534enum rxrpc_congest_mode {
@@ -761,15 +772,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
761 struct sockaddr_rxrpc *, 772 struct sockaddr_rxrpc *,
762 struct rxrpc_call_params *, gfp_t, 773 struct rxrpc_call_params *, gfp_t,
763 unsigned int); 774 unsigned int);
764int rxrpc_retry_client_call(struct rxrpc_sock *,
765 struct rxrpc_call *,
766 struct rxrpc_conn_parameters *,
767 struct sockaddr_rxrpc *,
768 gfp_t);
769void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, 775void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
770 struct sk_buff *); 776 struct sk_buff *);
771void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); 777void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
772int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *);
773void rxrpc_release_calls_on_socket(struct rxrpc_sock *); 778void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
774bool __rxrpc_queue_call(struct rxrpc_call *); 779bool __rxrpc_queue_call(struct rxrpc_call *);
775bool rxrpc_queue_call(struct rxrpc_call *); 780bool rxrpc_queue_call(struct rxrpc_call *);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 8f1a8f85b1f9..8aa2937b069f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -325,48 +325,6 @@ error:
325} 325}
326 326
327/* 327/*
328 * Retry a call to a new address. It is expected that the Tx queue of the call
329 * will contain data previously packaged for an old call.
330 */
331int rxrpc_retry_client_call(struct rxrpc_sock *rx,
332 struct rxrpc_call *call,
333 struct rxrpc_conn_parameters *cp,
334 struct sockaddr_rxrpc *srx,
335 gfp_t gfp)
336{
337 const void *here = __builtin_return_address(0);
338 int ret;
339
340 /* Set up or get a connection record and set the protocol parameters,
341 * including channel number and call ID.
342 */
343 ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
344 if (ret < 0)
345 goto error;
346
347 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
348 here, NULL);
349
350 rxrpc_start_call_timer(call);
351
352 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
353
354 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
355 rxrpc_queue_call(call);
356
357 _leave(" = 0");
358 return 0;
359
360error:
361 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
362 RX_CALL_DEAD, ret);
363 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
364 here, ERR_PTR(ret));
365 _leave(" = %d", ret);
366 return ret;
367}
368
369/*
370 * Set up an incoming call. call->conn points to the connection. 328 * Set up an incoming call. call->conn points to the connection.
371 * This is called in BH context and isn't allowed to fail. 329 * This is called in BH context and isn't allowed to fail.
372 */ 330 */
@@ -534,61 +492,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
534} 492}
535 493
536/* 494/*
537 * Prepare a kernel service call for retry.
538 */
539int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
540{
541 const void *here = __builtin_return_address(0);
542 int i;
543 u8 last = 0;
544
545 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
546
547 trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
548 here, (const void *)call->flags);
549
550 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
551 ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
552 ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
553 ASSERT(list_empty(&call->recvmsg_link));
554
555 del_timer_sync(&call->timer);
556
557 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
558
559 if (call->conn)
560 rxrpc_disconnect_call(call);
561
562 if (rxrpc_is_service_call(call) ||
563 !call->tx_phase ||
564 call->tx_hard_ack != 0 ||
565 call->rx_hard_ack != 0 ||
566 call->rx_top != 0)
567 return -EINVAL;
568
569 call->state = RXRPC_CALL_UNINITIALISED;
570 call->completion = RXRPC_CALL_SUCCEEDED;
571 call->call_id = 0;
572 call->cid = 0;
573 call->cong_cwnd = 0;
574 call->cong_extra = 0;
575 call->cong_ssthresh = 0;
576 call->cong_mode = 0;
577 call->cong_dup_acks = 0;
578 call->cong_cumul_acks = 0;
579 call->acks_lowest_nak = 0;
580
581 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
582 last |= call->rxtx_annotations[i];
583 call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
584 call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
585 }
586
587 _leave(" = 0");
588 return 0;
589}
590
591/*
592 * release all the calls associated with a socket 495 * release all the calls associated with a socket
593 */ 496 */
594void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) 497void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 521189f4b666..b2adfa825363 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -562,10 +562,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
562 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 562 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
563 563
564 write_lock_bh(&call->state_lock); 564 write_lock_bh(&call->state_lock);
565 if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) 565 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
566 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
567 else
568 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
569 write_unlock_bh(&call->state_lock); 566 write_unlock_bh(&call->state_lock);
570 567
571 rxrpc_see_call(call); 568 rxrpc_see_call(call);
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index eaf19ebaa964..3f7bb11f3290 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -596,6 +596,7 @@ error_requeue_call:
596 } 596 }
597error_no_call: 597error_no_call:
598 release_sock(&rx->sk); 598 release_sock(&rx->sk);
599error_trace:
599 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); 600 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
600 return ret; 601 return ret;
601 602
@@ -604,7 +605,7 @@ wait_interrupted:
604wait_error: 605wait_error:
605 finish_wait(sk_sleep(&rx->sk), &wait); 606 finish_wait(sk_sleep(&rx->sk), &wait);
606 call = NULL; 607 call = NULL;
607 goto error_no_call; 608 goto error_trace;
608} 609}
609 610
610/** 611/**
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index be01f9c5d963..46c9312085b1 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -169,10 +169,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
169 169
170 ASSERTCMP(seq, ==, call->tx_top + 1); 170 ASSERTCMP(seq, ==, call->tx_top + 1);
171 171
172 if (last) { 172 if (last)
173 annotation |= RXRPC_TX_ANNO_LAST; 173 annotation |= RXRPC_TX_ANNO_LAST;
174 set_bit(RXRPC_CALL_TX_LASTQ, &call->flags);
175 }
176 174
177 /* We have to set the timestamp before queueing as the retransmit 175 /* We have to set the timestamp before queueing as the retransmit
178 * algorithm can see the packet as soon as we queue it. 176 * algorithm can see the packet as soon as we queue it.
@@ -386,6 +384,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
386 call->tx_total_len -= copy; 384 call->tx_total_len -= copy;
387 } 385 }
388 386
387 /* check for the far side aborting the call or a network error
388 * occurring */
389 if (call->state == RXRPC_CALL_COMPLETE)
390 goto call_terminated;
391
389 /* add the packet to the send queue if it's now full */ 392 /* add the packet to the send queue if it's now full */
390 if (sp->remain <= 0 || 393 if (sp->remain <= 0 ||
391 (msg_data_left(msg) == 0 && !more)) { 394 (msg_data_left(msg) == 0 && !more)) {
@@ -425,16 +428,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
425 notify_end_tx); 428 notify_end_tx);
426 skb = NULL; 429 skb = NULL;
427 } 430 }
428
429 /* Check for the far side aborting the call or a network error
430 * occurring. If this happens, save any packet that was under
431 * construction so that in the case of a network error, the
432 * call can be retried or redirected.
433 */
434 if (call->state == RXRPC_CALL_COMPLETE) {
435 ret = call->error;
436 goto out;
437 }
438 } while (msg_data_left(msg) > 0); 431 } while (msg_data_left(msg) > 0);
439 432
440success: 433success:
@@ -444,6 +437,11 @@ out:
444 _leave(" = %d", ret); 437 _leave(" = %d", ret);
445 return ret; 438 return ret;
446 439
440call_terminated:
441 rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
442 _leave(" = %d", call->error);
443 return call->error;
444
447maybe_error: 445maybe_error:
448 if (copied) 446 if (copied)
449 goto success; 447 goto success;
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index c3b90fadaff6..8b43fe0130f7 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
197 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, 197 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
198}; 198};
199 199
200static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
201{
202 if (!p)
203 return;
204 if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
205 dst_release(&p->tcft_enc_metadata->dst);
206 kfree_rcu(p, rcu);
207}
208
200static int tunnel_key_init(struct net *net, struct nlattr *nla, 209static int tunnel_key_init(struct net *net, struct nlattr *nla,
201 struct nlattr *est, struct tc_action **a, 210 struct nlattr *est, struct tc_action **a,
202 int ovr, int bind, bool rtnl_held, 211 int ovr, int bind, bool rtnl_held,
@@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
360 rcu_swap_protected(t->params, params_new, 369 rcu_swap_protected(t->params, params_new,
361 lockdep_is_held(&t->tcf_lock)); 370 lockdep_is_held(&t->tcf_lock));
362 spin_unlock_bh(&t->tcf_lock); 371 spin_unlock_bh(&t->tcf_lock);
363 if (params_new) 372 tunnel_key_release_params(params_new);
364 kfree_rcu(params_new, rcu);
365 373
366 if (ret == ACT_P_CREATED) 374 if (ret == ACT_P_CREATED)
367 tcf_idr_insert(tn, *a); 375 tcf_idr_insert(tn, *a);
@@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a)
385 struct tcf_tunnel_key_params *params; 393 struct tcf_tunnel_key_params *params;
386 394
387 params = rcu_dereference_protected(t->params, 1); 395 params = rcu_dereference_protected(t->params, 1);
388 if (params) { 396 tunnel_key_release_params(params);
389 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
390 dst_release(&params->tcft_enc_metadata->dst);
391
392 kfree_rcu(params, rcu);
393 }
394} 397}
395 398
396static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, 399static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8ce2a0507970..e2b5cb2eb34e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1277,7 +1277,6 @@ EXPORT_SYMBOL(tcf_block_cb_unregister);
1277int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1277int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1278 struct tcf_result *res, bool compat_mode) 1278 struct tcf_result *res, bool compat_mode)
1279{ 1279{
1280 __be16 protocol = tc_skb_protocol(skb);
1281#ifdef CONFIG_NET_CLS_ACT 1280#ifdef CONFIG_NET_CLS_ACT
1282 const int max_reclassify_loop = 4; 1281 const int max_reclassify_loop = 4;
1283 const struct tcf_proto *orig_tp = tp; 1282 const struct tcf_proto *orig_tp = tp;
@@ -1287,6 +1286,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1287reclassify: 1286reclassify:
1288#endif 1287#endif
1289 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1288 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1289 __be16 protocol = tc_skb_protocol(skb);
1290 int err; 1290 int err;
1291 1291
1292 if (tp->protocol != protocol && 1292 if (tp->protocol != protocol &&
@@ -1319,7 +1319,6 @@ reset:
1319 } 1319 }
1320 1320
1321 tp = first_tp; 1321 tp = first_tp;
1322 protocol = tc_skb_protocol(skb);
1323 goto reclassify; 1322 goto reclassify;
1324#endif 1323#endif
1325} 1324}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index dad04e710493..12ca9d13db83 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1290,17 +1290,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1290 struct cls_fl_head *head = rtnl_dereference(tp->root); 1290 struct cls_fl_head *head = rtnl_dereference(tp->root);
1291 struct cls_fl_filter *fold = *arg; 1291 struct cls_fl_filter *fold = *arg;
1292 struct cls_fl_filter *fnew; 1292 struct cls_fl_filter *fnew;
1293 struct fl_flow_mask *mask;
1293 struct nlattr **tb; 1294 struct nlattr **tb;
1294 struct fl_flow_mask mask = {};
1295 int err; 1295 int err;
1296 1296
1297 if (!tca[TCA_OPTIONS]) 1297 if (!tca[TCA_OPTIONS])
1298 return -EINVAL; 1298 return -EINVAL;
1299 1299
1300 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 1300 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1301 if (!tb) 1301 if (!mask)
1302 return -ENOBUFS; 1302 return -ENOBUFS;
1303 1303
1304 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1305 if (!tb) {
1306 err = -ENOBUFS;
1307 goto errout_mask_alloc;
1308 }
1309
1304 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], 1310 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1305 fl_policy, NULL); 1311 fl_policy, NULL);
1306 if (err < 0) 1312 if (err < 0)
@@ -1343,12 +1349,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1343 } 1349 }
1344 } 1350 }
1345 1351
1346 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr, 1352 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1347 tp->chain->tmplt_priv, extack); 1353 tp->chain->tmplt_priv, extack);
1348 if (err) 1354 if (err)
1349 goto errout_idr; 1355 goto errout_idr;
1350 1356
1351 err = fl_check_assign_mask(head, fnew, fold, &mask); 1357 err = fl_check_assign_mask(head, fnew, fold, mask);
1352 if (err) 1358 if (err)
1353 goto errout_idr; 1359 goto errout_idr;
1354 1360
@@ -1365,7 +1371,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1365 if (!tc_skip_hw(fnew->flags)) { 1371 if (!tc_skip_hw(fnew->flags)) {
1366 err = fl_hw_replace_filter(tp, fnew, extack); 1372 err = fl_hw_replace_filter(tp, fnew, extack);
1367 if (err) 1373 if (err)
1368 goto errout_mask; 1374 goto errout_mask_ht;
1369 } 1375 }
1370 1376
1371 if (!tc_in_hw(fnew->flags)) 1377 if (!tc_in_hw(fnew->flags))
@@ -1392,8 +1398,13 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1392 } 1398 }
1393 1399
1394 kfree(tb); 1400 kfree(tb);
1401 kfree(mask);
1395 return 0; 1402 return 0;
1396 1403
1404errout_mask_ht:
1405 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1406 fnew->mask->filter_ht_params);
1407
1397errout_mask: 1408errout_mask:
1398 fl_mask_put(head, fnew->mask, false); 1409 fl_mask_put(head, fnew->mask, false);
1399 1410
@@ -1405,6 +1416,8 @@ errout:
1405 kfree(fnew); 1416 kfree(fnew);
1406errout_tb: 1417errout_tb:
1407 kfree(tb); 1418 kfree(tb);
1419errout_mask_alloc:
1420 kfree(mask);
1408 return err; 1421 return err;
1409} 1422}
1410 1423
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 9ccc93f257db..38bb882bb958 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -48,7 +48,7 @@ struct tcindex_data {
48 u32 hash; /* hash table size; 0 if undefined */ 48 u32 hash; /* hash table size; 0 if undefined */
49 u32 alloc_hash; /* allocated size */ 49 u32 alloc_hash; /* allocated size */
50 u32 fall_through; /* 0: only classify if explicit match */ 50 u32 fall_through; /* 0: only classify if explicit match */
51 struct rcu_head rcu; 51 struct rcu_work rwork;
52}; 52};
53 53
54static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) 54static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
@@ -221,17 +221,11 @@ found:
221 return 0; 221 return 0;
222} 222}
223 223
224static int tcindex_destroy_element(struct tcf_proto *tp, 224static void tcindex_destroy_work(struct work_struct *work)
225 void *arg, struct tcf_walker *walker)
226{
227 bool last;
228
229 return tcindex_delete(tp, arg, &last, NULL);
230}
231
232static void __tcindex_destroy(struct rcu_head *head)
233{ 225{
234 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); 226 struct tcindex_data *p = container_of(to_rcu_work(work),
227 struct tcindex_data,
228 rwork);
235 229
236 kfree(p->perfect); 230 kfree(p->perfect);
237 kfree(p->h); 231 kfree(p->h);
@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
258 return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); 252 return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
259} 253}
260 254
261static void __tcindex_partial_destroy(struct rcu_head *head) 255static void tcindex_partial_destroy_work(struct work_struct *work)
262{ 256{
263 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); 257 struct tcindex_data *p = container_of(to_rcu_work(work),
258 struct tcindex_data,
259 rwork);
264 260
265 kfree(p->perfect); 261 kfree(p->perfect);
266 kfree(p); 262 kfree(p);
@@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
275 kfree(cp->perfect); 271 kfree(cp->perfect);
276} 272}
277 273
278static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) 274static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
279{ 275{
280 int i, err = 0; 276 int i, err = 0;
281 277
@@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
289 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); 285 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
290 if (err < 0) 286 if (err < 0)
291 goto errout; 287 goto errout;
288#ifdef CONFIG_NET_CLS_ACT
289 cp->perfect[i].exts.net = net;
290#endif
292 } 291 }
293 292
294 return 0; 293 return 0;
@@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
305 struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) 304 struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
306{ 305{
307 struct tcindex_filter_result new_filter_result, *old_r = r; 306 struct tcindex_filter_result new_filter_result, *old_r = r;
308 struct tcindex_filter_result cr;
309 struct tcindex_data *cp = NULL, *oldp; 307 struct tcindex_data *cp = NULL, *oldp;
310 struct tcindex_filter *f = NULL; /* make gcc behave */ 308 struct tcindex_filter *f = NULL; /* make gcc behave */
309 struct tcf_result cr = {};
311 int err, balloc = 0; 310 int err, balloc = 0;
312 struct tcf_exts e; 311 struct tcf_exts e;
313 312
@@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
337 if (p->perfect) { 336 if (p->perfect) {
338 int i; 337 int i;
339 338
340 if (tcindex_alloc_perfect_hash(cp) < 0) 339 if (tcindex_alloc_perfect_hash(net, cp) < 0)
341 goto errout; 340 goto errout;
342 for (i = 0; i < cp->hash; i++) 341 for (i = 0; i < cp->hash; i++)
343 cp->perfect[i].res = p->perfect[i].res; 342 cp->perfect[i].res = p->perfect[i].res;
@@ -348,11 +347,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
348 err = tcindex_filter_result_init(&new_filter_result); 347 err = tcindex_filter_result_init(&new_filter_result);
349 if (err < 0) 348 if (err < 0)
350 goto errout1; 349 goto errout1;
351 err = tcindex_filter_result_init(&cr);
352 if (err < 0)
353 goto errout1;
354 if (old_r) 350 if (old_r)
355 cr.res = r->res; 351 cr = r->res;
356 352
357 if (tb[TCA_TCINDEX_HASH]) 353 if (tb[TCA_TCINDEX_HASH])
358 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); 354 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
406 err = -ENOMEM; 402 err = -ENOMEM;
407 if (!cp->perfect && !cp->h) { 403 if (!cp->perfect && !cp->h) {
408 if (valid_perfect_hash(cp)) { 404 if (valid_perfect_hash(cp)) {
409 if (tcindex_alloc_perfect_hash(cp) < 0) 405 if (tcindex_alloc_perfect_hash(net, cp) < 0)
410 goto errout_alloc; 406 goto errout_alloc;
411 balloc = 1; 407 balloc = 1;
412 } else { 408 } else {
@@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
443 } 439 }
444 440
445 if (tb[TCA_TCINDEX_CLASSID]) { 441 if (tb[TCA_TCINDEX_CLASSID]) {
446 cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); 442 cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
447 tcf_bind_filter(tp, &cr.res, base); 443 tcf_bind_filter(tp, &cr, base);
448 } 444 }
449 445
450 if (old_r && old_r != r) { 446 if (old_r && old_r != r) {
@@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
456 } 452 }
457 453
458 oldp = p; 454 oldp = p;
459 r->res = cr.res; 455 r->res = cr;
460 tcf_exts_change(&r->exts, &e); 456 tcf_exts_change(&r->exts, &e);
461 457
462 rcu_assign_pointer(tp->root, cp); 458 rcu_assign_pointer(tp->root, cp);
@@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
475 ; /* nothing */ 471 ; /* nothing */
476 472
477 rcu_assign_pointer(*fp, f); 473 rcu_assign_pointer(*fp, f);
474 } else {
475 tcf_exts_destroy(&new_filter_result.exts);
478 } 476 }
479 477
480 if (oldp) 478 if (oldp)
481 call_rcu(&oldp->rcu, __tcindex_partial_destroy); 479 tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
482 return 0; 480 return 0;
483 481
484errout_alloc: 482errout_alloc:
@@ -487,7 +485,6 @@ errout_alloc:
487 else if (balloc == 2) 485 else if (balloc == 2)
488 kfree(cp->h); 486 kfree(cp->h);
489errout1: 487errout1:
490 tcf_exts_destroy(&cr.exts);
491 tcf_exts_destroy(&new_filter_result.exts); 488 tcf_exts_destroy(&new_filter_result.exts);
492errout: 489errout:
493 kfree(cp); 490 kfree(cp);
@@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp,
562 struct netlink_ext_ack *extack) 559 struct netlink_ext_ack *extack)
563{ 560{
564 struct tcindex_data *p = rtnl_dereference(tp->root); 561 struct tcindex_data *p = rtnl_dereference(tp->root);
565 struct tcf_walker walker; 562 int i;
566 563
567 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); 564 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
568 walker.count = 0;
569 walker.skip = 0;
570 walker.fn = tcindex_destroy_element;
571 tcindex_walk(tp, &walker);
572 565
573 call_rcu(&p->rcu, __tcindex_destroy); 566 if (p->perfect) {
567 for (i = 0; i < p->hash; i++) {
568 struct tcindex_filter_result *r = p->perfect + i;
569
570 tcf_unbind_filter(tp, &r->res);
571 if (tcf_exts_get_net(&r->exts))
572 tcf_queue_work(&r->rwork,
573 tcindex_destroy_rexts_work);
574 else
575 __tcindex_destroy_rexts(r);
576 }
577 }
578
579 for (i = 0; p->h && i < p->hash; i++) {
580 struct tcindex_filter *f, *next;
581 bool last;
582
583 for (f = rtnl_dereference(p->h[i]); f; f = next) {
584 next = rtnl_dereference(f->next);
585 tcindex_delete(tp, &f->result, &last, NULL);
586 }
587 }
588
589 tcf_queue_work(&p->rwork, tcindex_destroy_work);
574} 590}
575 591
576 592
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index b910cd5c56f7..73940293700d 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1667,7 +1667,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1667 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { 1667 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
1668 struct sk_buff *segs, *nskb; 1668 struct sk_buff *segs, *nskb;
1669 netdev_features_t features = netif_skb_features(skb); 1669 netdev_features_t features = netif_skb_features(skb);
1670 unsigned int slen = 0; 1670 unsigned int slen = 0, numsegs = 0;
1671 1671
1672 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 1672 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
1673 if (IS_ERR_OR_NULL(segs)) 1673 if (IS_ERR_OR_NULL(segs))
@@ -1683,6 +1683,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1683 flow_queue_add(flow, segs); 1683 flow_queue_add(flow, segs);
1684 1684
1685 sch->q.qlen++; 1685 sch->q.qlen++;
1686 numsegs++;
1686 slen += segs->len; 1687 slen += segs->len;
1687 q->buffer_used += segs->truesize; 1688 q->buffer_used += segs->truesize;
1688 b->packets++; 1689 b->packets++;
@@ -1696,7 +1697,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1696 sch->qstats.backlog += slen; 1697 sch->qstats.backlog += slen;
1697 q->avg_window_bytes += slen; 1698 q->avg_window_bytes += slen;
1698 1699
1699 qdisc_tree_reduce_backlog(sch, 1, len); 1700 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
1700 consume_skb(skb); 1701 consume_skb(skb);
1701 } else { 1702 } else {
1702 /* not splitting */ 1703 /* not splitting */
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index e689e11b6d0f..c6a502933fe7 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
88 struct Qdisc *child, 88 struct Qdisc *child,
89 struct sk_buff **to_free) 89 struct sk_buff **to_free)
90{ 90{
91 unsigned int len = qdisc_pkt_len(skb);
91 int err; 92 int err;
92 93
93 err = child->ops->enqueue(skb, child, to_free); 94 err = child->ops->enqueue(skb, child, to_free);
94 if (err != NET_XMIT_SUCCESS) 95 if (err != NET_XMIT_SUCCESS)
95 return err; 96 return err;
96 97
97 qdisc_qstats_backlog_inc(sch, skb); 98 sch->qstats.backlog += len;
98 sch->q.qlen++; 99 sch->q.qlen++;
99 100
100 return NET_XMIT_SUCCESS; 101 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index cdebaed0f8cf..09b800991065 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -350,9 +350,11 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
350static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, 350static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
351 struct sk_buff **to_free) 351 struct sk_buff **to_free)
352{ 352{
353 unsigned int len = qdisc_pkt_len(skb);
353 struct drr_sched *q = qdisc_priv(sch); 354 struct drr_sched *q = qdisc_priv(sch);
354 struct drr_class *cl; 355 struct drr_class *cl;
355 int err = 0; 356 int err = 0;
357 bool first;
356 358
357 cl = drr_classify(skb, sch, &err); 359 cl = drr_classify(skb, sch, &err);
358 if (cl == NULL) { 360 if (cl == NULL) {
@@ -362,6 +364,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
362 return err; 364 return err;
363 } 365 }
364 366
367 first = !cl->qdisc->q.qlen;
365 err = qdisc_enqueue(skb, cl->qdisc, to_free); 368 err = qdisc_enqueue(skb, cl->qdisc, to_free);
366 if (unlikely(err != NET_XMIT_SUCCESS)) { 369 if (unlikely(err != NET_XMIT_SUCCESS)) {
367 if (net_xmit_drop_count(err)) { 370 if (net_xmit_drop_count(err)) {
@@ -371,12 +374,12 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
371 return err; 374 return err;
372 } 375 }
373 376
374 if (cl->qdisc->q.qlen == 1) { 377 if (first) {
375 list_add_tail(&cl->alist, &q->active); 378 list_add_tail(&cl->alist, &q->active);
376 cl->deficit = cl->quantum; 379 cl->deficit = cl->quantum;
377 } 380 }
378 381
379 qdisc_qstats_backlog_inc(sch, skb); 382 sch->qstats.backlog += len;
380 sch->q.qlen++; 383 sch->q.qlen++;
381 return err; 384 return err;
382} 385}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index f6f480784bc6..42471464ded3 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
199static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, 199static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
200 struct sk_buff **to_free) 200 struct sk_buff **to_free)
201{ 201{
202 unsigned int len = qdisc_pkt_len(skb);
202 struct dsmark_qdisc_data *p = qdisc_priv(sch); 203 struct dsmark_qdisc_data *p = qdisc_priv(sch);
203 int err; 204 int err;
204 205
@@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
271 return err; 272 return err;
272 } 273 }
273 274
274 qdisc_qstats_backlog_inc(sch, skb); 275 sch->qstats.backlog += len;
275 sch->q.qlen++; 276 sch->q.qlen++;
276 277
277 return NET_XMIT_SUCCESS; 278 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 66ba2ce2320f..968a85fe4d4a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -500,7 +500,7 @@ static void dev_watchdog_down(struct net_device *dev)
500 * netif_carrier_on - set carrier 500 * netif_carrier_on - set carrier
501 * @dev: network device 501 * @dev: network device
502 * 502 *
503 * Device has detected that carrier. 503 * Device has detected acquisition of carrier.
504 */ 504 */
505void netif_carrier_on(struct net_device *dev) 505void netif_carrier_on(struct net_device *dev)
506{ 506{
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index b18ec1f6de60..24cc220a3218 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1539,8 +1539,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1539static int 1539static int
1540hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) 1540hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1541{ 1541{
1542 unsigned int len = qdisc_pkt_len(skb);
1542 struct hfsc_class *cl; 1543 struct hfsc_class *cl;
1543 int uninitialized_var(err); 1544 int uninitialized_var(err);
1545 bool first;
1544 1546
1545 cl = hfsc_classify(skb, sch, &err); 1547 cl = hfsc_classify(skb, sch, &err);
1546 if (cl == NULL) { 1548 if (cl == NULL) {
@@ -1550,6 +1552,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1550 return err; 1552 return err;
1551 } 1553 }
1552 1554
1555 first = !cl->qdisc->q.qlen;
1553 err = qdisc_enqueue(skb, cl->qdisc, to_free); 1556 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1554 if (unlikely(err != NET_XMIT_SUCCESS)) { 1557 if (unlikely(err != NET_XMIT_SUCCESS)) {
1555 if (net_xmit_drop_count(err)) { 1558 if (net_xmit_drop_count(err)) {
@@ -1559,9 +1562,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1559 return err; 1562 return err;
1560 } 1563 }
1561 1564
1562 if (cl->qdisc->q.qlen == 1) { 1565 if (first) {
1563 unsigned int len = qdisc_pkt_len(skb);
1564
1565 if (cl->cl_flags & HFSC_RSC) 1566 if (cl->cl_flags & HFSC_RSC)
1566 init_ed(cl, len); 1567 init_ed(cl, len);
1567 if (cl->cl_flags & HFSC_FSC) 1568 if (cl->cl_flags & HFSC_FSC)
@@ -1576,7 +1577,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1576 1577
1577 } 1578 }
1578 1579
1579 qdisc_qstats_backlog_inc(sch, skb); 1580 sch->qstats.backlog += len;
1580 sch->q.qlen++; 1581 sch->q.qlen++;
1581 1582
1582 return NET_XMIT_SUCCESS; 1583 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 58b449490757..30f9da7e1076 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
581 struct sk_buff **to_free) 581 struct sk_buff **to_free)
582{ 582{
583 int uninitialized_var(ret); 583 int uninitialized_var(ret);
584 unsigned int len = qdisc_pkt_len(skb);
584 struct htb_sched *q = qdisc_priv(sch); 585 struct htb_sched *q = qdisc_priv(sch);
585 struct htb_class *cl = htb_classify(skb, sch, &ret); 586 struct htb_class *cl = htb_classify(skb, sch, &ret);
586 587
@@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
610 htb_activate(q, cl); 611 htb_activate(q, cl);
611 } 612 }
612 613
613 qdisc_qstats_backlog_inc(sch, skb); 614 sch->qstats.backlog += len;
614 sch->q.qlen++; 615 sch->q.qlen++;
615 return NET_XMIT_SUCCESS; 616 return NET_XMIT_SUCCESS;
616} 617}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index cdf68706e40f..847141cd900f 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
72static int 72static int
73prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) 73prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
74{ 74{
75 unsigned int len = qdisc_pkt_len(skb);
75 struct Qdisc *qdisc; 76 struct Qdisc *qdisc;
76 int ret; 77 int ret;
77 78
@@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
88 89
89 ret = qdisc_enqueue(skb, qdisc, to_free); 90 ret = qdisc_enqueue(skb, qdisc, to_free);
90 if (ret == NET_XMIT_SUCCESS) { 91 if (ret == NET_XMIT_SUCCESS) {
91 qdisc_qstats_backlog_inc(sch, skb); 92 sch->qstats.backlog += len;
92 sch->q.qlen++; 93 sch->q.qlen++;
93 return NET_XMIT_SUCCESS; 94 return NET_XMIT_SUCCESS;
94 } 95 }
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index dc37c4ead439..29f5c4a24688 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1210,10 +1210,12 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
1210static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, 1210static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1211 struct sk_buff **to_free) 1211 struct sk_buff **to_free)
1212{ 1212{
1213 unsigned int len = qdisc_pkt_len(skb), gso_segs;
1213 struct qfq_sched *q = qdisc_priv(sch); 1214 struct qfq_sched *q = qdisc_priv(sch);
1214 struct qfq_class *cl; 1215 struct qfq_class *cl;
1215 struct qfq_aggregate *agg; 1216 struct qfq_aggregate *agg;
1216 int err = 0; 1217 int err = 0;
1218 bool first;
1217 1219
1218 cl = qfq_classify(skb, sch, &err); 1220 cl = qfq_classify(skb, sch, &err);
1219 if (cl == NULL) { 1221 if (cl == NULL) {
@@ -1224,17 +1226,18 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1224 } 1226 }
1225 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); 1227 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
1226 1228
1227 if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) { 1229 if (unlikely(cl->agg->lmax < len)) {
1228 pr_debug("qfq: increasing maxpkt from %u to %u for class %u", 1230 pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
1229 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); 1231 cl->agg->lmax, len, cl->common.classid);
1230 err = qfq_change_agg(sch, cl, cl->agg->class_weight, 1232 err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
1231 qdisc_pkt_len(skb));
1232 if (err) { 1233 if (err) {
1233 cl->qstats.drops++; 1234 cl->qstats.drops++;
1234 return qdisc_drop(skb, sch, to_free); 1235 return qdisc_drop(skb, sch, to_free);
1235 } 1236 }
1236 } 1237 }
1237 1238
1239 gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
1240 first = !cl->qdisc->q.qlen;
1238 err = qdisc_enqueue(skb, cl->qdisc, to_free); 1241 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1239 if (unlikely(err != NET_XMIT_SUCCESS)) { 1242 if (unlikely(err != NET_XMIT_SUCCESS)) {
1240 pr_debug("qfq_enqueue: enqueue failed %d\n", err); 1243 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
@@ -1245,16 +1248,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1245 return err; 1248 return err;
1246 } 1249 }
1247 1250
1248 bstats_update(&cl->bstats, skb); 1251 cl->bstats.bytes += len;
1249 qdisc_qstats_backlog_inc(sch, skb); 1252 cl->bstats.packets += gso_segs;
1253 sch->qstats.backlog += len;
1250 ++sch->q.qlen; 1254 ++sch->q.qlen;
1251 1255
1252 agg = cl->agg; 1256 agg = cl->agg;
1253 /* if the queue was not empty, then done here */ 1257 /* if the queue was not empty, then done here */
1254 if (cl->qdisc->q.qlen != 1) { 1258 if (!first) {
1255 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && 1259 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
1256 list_first_entry(&agg->active, struct qfq_class, alist) 1260 list_first_entry(&agg->active, struct qfq_class, alist)
1257 == cl && cl->deficit < qdisc_pkt_len(skb)) 1261 == cl && cl->deficit < len)
1258 list_move_tail(&cl->alist, &agg->active); 1262 list_move_tail(&cl->alist, &agg->active);
1259 1263
1260 return err; 1264 return err;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 942dcca09cf2..7f272a9070c5 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185 struct sk_buff **to_free) 185 struct sk_buff **to_free)
186{ 186{
187 struct tbf_sched_data *q = qdisc_priv(sch); 187 struct tbf_sched_data *q = qdisc_priv(sch);
188 unsigned int len = qdisc_pkt_len(skb);
188 int ret; 189 int ret;
189 190
190 if (qdisc_pkt_len(skb) > q->max_size) { 191 if (qdisc_pkt_len(skb) > q->max_size) {
@@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
200 return ret; 201 return ret;
201 } 202 }
202 203
203 qdisc_qstats_backlog_inc(sch, skb); 204 sch->qstats.backlog += len;
204 sch->q.qlen++; 205 sch->q.qlen++;
205 return NET_XMIT_SUCCESS; 206 return NET_XMIT_SUCCESS;
206} 207}
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 078f01a8d582..435847d98b51 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
256 + nla_total_size(1) /* INET_DIAG_TOS */ 256 + nla_total_size(1) /* INET_DIAG_TOS */
257 + nla_total_size(1) /* INET_DIAG_TCLASS */ 257 + nla_total_size(1) /* INET_DIAG_TCLASS */
258 + nla_total_size(4) /* INET_DIAG_MARK */ 258 + nla_total_size(4) /* INET_DIAG_MARK */
259 + nla_total_size(4) /* INET_DIAG_CLASS_ID */
259 + nla_total_size(addrlen * asoc->peer.transport_count) 260 + nla_total_size(addrlen * asoc->peer.transport_count)
260 + nla_total_size(addrlen * addrcnt) 261 + nla_total_size(addrlen * addrcnt)
261 + nla_total_size(sizeof(struct inet_diag_meminfo)) 262 + nla_total_size(sizeof(struct inet_diag_meminfo))
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index b9ed271b7ef7..6200cd2b4b99 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
97 97
98 switch (ev) { 98 switch (ev) {
99 case NETDEV_UP: 99 case NETDEV_UP:
100 addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); 100 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
101 if (addr) { 101 if (addr) {
102 addr->a.v6.sin6_family = AF_INET6; 102 addr->a.v6.sin6_family = AF_INET6;
103 addr->a.v6.sin6_port = 0;
104 addr->a.v6.sin6_flowinfo = 0;
105 addr->a.v6.sin6_addr = ifa->addr; 103 addr->a.v6.sin6_addr = ifa->addr;
106 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; 104 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
107 addr->valid = 1; 105 addr->valid = 1;
@@ -282,7 +280,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
282 280
283 if (saddr) { 281 if (saddr) {
284 fl6->saddr = saddr->v6.sin6_addr; 282 fl6->saddr = saddr->v6.sin6_addr;
285 fl6->fl6_sport = saddr->v6.sin6_port; 283 if (!fl6->fl6_sport)
284 fl6->fl6_sport = saddr->v6.sin6_port;
286 285
287 pr_debug("src=%pI6 - ", &fl6->saddr); 286 pr_debug("src=%pI6 - ", &fl6->saddr);
288 } 287 }
@@ -434,7 +433,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
434 addr = kzalloc(sizeof(*addr), GFP_ATOMIC); 433 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
435 if (addr) { 434 if (addr) {
436 addr->a.v6.sin6_family = AF_INET6; 435 addr->a.v6.sin6_family = AF_INET6;
437 addr->a.v6.sin6_port = 0;
438 addr->a.v6.sin6_addr = ifp->addr; 436 addr->a.v6.sin6_addr = ifp->addr;
439 addr->a.v6.sin6_scope_id = dev->ifindex; 437 addr->a.v6.sin6_scope_id = dev->ifindex;
440 addr->valid = 1; 438 addr->valid = 1;
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 123e9f2dc226..edfcf16e704c 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
36{ 36{
37 skb->ip_summed = CHECKSUM_NONE; 37 skb->ip_summed = CHECKSUM_NONE;
38 skb->csum_not_inet = 0; 38 skb->csum_not_inet = 0;
39 gso_reset_checksum(skb, ~0);
39 return sctp_compute_cksum(skb, skb_transport_offset(skb)); 40 return sctp_compute_cksum(skb, skb_transport_offset(skb));
40} 41}
41 42
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d5878ae55840..6abc8b274270 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
101 addr = kzalloc(sizeof(*addr), GFP_ATOMIC); 101 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
102 if (addr) { 102 if (addr) {
103 addr->a.v4.sin_family = AF_INET; 103 addr->a.v4.sin_family = AF_INET;
104 addr->a.v4.sin_port = 0;
105 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 104 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
106 addr->valid = 1; 105 addr->valid = 1;
107 INIT_LIST_HEAD(&addr->list); 106 INIT_LIST_HEAD(&addr->list);
@@ -441,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
441 } 440 }
442 if (saddr) { 441 if (saddr) {
443 fl4->saddr = saddr->v4.sin_addr.s_addr; 442 fl4->saddr = saddr->v4.sin_addr.s_addr;
444 fl4->fl4_sport = saddr->v4.sin_port; 443 if (!fl4->fl4_sport)
444 fl4->fl4_sport = saddr->v4.sin_port;
445 } 445 }
446 446
447 pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr, 447 pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
@@ -776,10 +776,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
776 776
777 switch (ev) { 777 switch (ev) {
778 case NETDEV_UP: 778 case NETDEV_UP:
779 addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); 779 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
780 if (addr) { 780 if (addr) {
781 addr->a.v4.sin_family = AF_INET; 781 addr->a.v4.sin_family = AF_INET;
782 addr->a.v4.sin_port = 0;
783 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 782 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
784 addr->valid = 1; 783 addr->valid = 1;
785 spin_lock_bh(&net->sctp.local_addr_lock); 784 spin_lock_bh(&net->sctp.local_addr_lock);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index f4ac6c592e13..d05c57664e36 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
495 * 495 *
496 * [INIT ACK back to where the INIT came from.] 496 * [INIT ACK back to where the INIT came from.]
497 */ 497 */
498 retval->transport = chunk->transport; 498 if (chunk->transport)
499 retval->transport =
500 sctp_assoc_lookup_paddr(asoc,
501 &chunk->transport->ipaddr);
499 502
500 retval->subh.init_hdr = 503 retval->subh.init_hdr =
501 sctp_addto_chunk(retval, sizeof(initack), &initack); 504 sctp_addto_chunk(retval, sizeof(initack), &initack);
@@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
642 * 645 *
643 * [COOKIE ACK back to where the COOKIE ECHO came from.] 646 * [COOKIE ACK back to where the COOKIE ECHO came from.]
644 */ 647 */
645 if (retval && chunk) 648 if (retval && chunk && chunk->transport)
646 retval->transport = chunk->transport; 649 retval->transport =
650 sctp_assoc_lookup_paddr(asoc,
651 &chunk->transport->ipaddr);
647 652
648 return retval; 653 return retval;
649} 654}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f93c3cf9e567..65d6d04546ae 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2027,7 +2027,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
2027 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 2027 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
2028 struct sctp_transport *transport = NULL; 2028 struct sctp_transport *transport = NULL;
2029 struct sctp_sndrcvinfo _sinfo, *sinfo; 2029 struct sctp_sndrcvinfo _sinfo, *sinfo;
2030 struct sctp_association *asoc; 2030 struct sctp_association *asoc, *tmp;
2031 struct sctp_cmsgs cmsgs; 2031 struct sctp_cmsgs cmsgs;
2032 union sctp_addr *daddr; 2032 union sctp_addr *daddr;
2033 bool new = false; 2033 bool new = false;
@@ -2053,7 +2053,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
2053 2053
2054 /* SCTP_SENDALL process */ 2054 /* SCTP_SENDALL process */
2055 if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { 2055 if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
2056 list_for_each_entry(asoc, &ep->asocs, asocs) { 2056 list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
2057 err = sctp_sendmsg_check_sflags(asoc, sflags, msg, 2057 err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
2058 msg_len); 2058 msg_len);
2059 if (err == 0) 2059 if (err == 0)
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 3892e7630f3a..2936ed17bf9e 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count)
84 } 84 }
85} 85}
86 86
87static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
88{
89 size_t index = 0;
90
91 while (count--) {
92 if (elem == flex_array_get(fa, index))
93 break;
94 index++;
95 }
96
97 return index;
98}
99
87/* Migrates chunks from stream queues to new stream queues if needed, 100/* Migrates chunks from stream queues to new stream queues if needed,
88 * but not across associations. Also, removes those chunks to streams 101 * but not across associations. Also, removes those chunks to streams
89 * higher than the new max. 102 * higher than the new max.
@@ -131,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
131 } 144 }
132 } 145 }
133 146
134 for (i = outcnt; i < stream->outcnt; i++) 147 for (i = outcnt; i < stream->outcnt; i++) {
135 kfree(SCTP_SO(stream, i)->ext); 148 kfree(SCTP_SO(stream, i)->ext);
149 SCTP_SO(stream, i)->ext = NULL;
150 }
136} 151}
137 152
138static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, 153static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
@@ -147,6 +162,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
147 162
148 if (stream->out) { 163 if (stream->out) {
149 fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); 164 fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
165 if (stream->out_curr) {
166 size_t index = fa_index(stream->out, stream->out_curr,
167 stream->outcnt);
168
169 BUG_ON(index == stream->outcnt);
170 stream->out_curr = flex_array_get(out, index);
171 }
150 fa_free(stream->out); 172 fa_free(stream->out);
151 } 173 }
152 174
@@ -585,9 +607,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
585 struct sctp_strreset_outreq *outreq = param.v; 607 struct sctp_strreset_outreq *outreq = param.v;
586 struct sctp_stream *stream = &asoc->stream; 608 struct sctp_stream *stream = &asoc->stream;
587 __u32 result = SCTP_STRRESET_DENIED; 609 __u32 result = SCTP_STRRESET_DENIED;
588 __u16 i, nums, flags = 0;
589 __be16 *str_p = NULL; 610 __be16 *str_p = NULL;
590 __u32 request_seq; 611 __u32 request_seq;
612 __u16 i, nums;
591 613
592 request_seq = ntohl(outreq->request_seq); 614 request_seq = ntohl(outreq->request_seq);
593 615
@@ -615,6 +637,15 @@ struct sctp_chunk *sctp_process_strreset_outreq(
615 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) 637 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
616 goto out; 638 goto out;
617 639
640 nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
641 str_p = outreq->list_of_streams;
642 for (i = 0; i < nums; i++) {
643 if (ntohs(str_p[i]) >= stream->incnt) {
644 result = SCTP_STRRESET_ERR_WRONG_SSN;
645 goto out;
646 }
647 }
648
618 if (asoc->strreset_chunk) { 649 if (asoc->strreset_chunk) {
619 if (!sctp_chunk_lookup_strreset_param( 650 if (!sctp_chunk_lookup_strreset_param(
620 asoc, outreq->response_seq, 651 asoc, outreq->response_seq,
@@ -637,32 +668,19 @@ struct sctp_chunk *sctp_process_strreset_outreq(
637 sctp_chunk_put(asoc->strreset_chunk); 668 sctp_chunk_put(asoc->strreset_chunk);
638 asoc->strreset_chunk = NULL; 669 asoc->strreset_chunk = NULL;
639 } 670 }
640
641 flags = SCTP_STREAM_RESET_INCOMING_SSN;
642 } 671 }
643 672
644 nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16); 673 if (nums)
645 if (nums) {
646 str_p = outreq->list_of_streams;
647 for (i = 0; i < nums; i++) {
648 if (ntohs(str_p[i]) >= stream->incnt) {
649 result = SCTP_STRRESET_ERR_WRONG_SSN;
650 goto out;
651 }
652 }
653
654 for (i = 0; i < nums; i++) 674 for (i = 0; i < nums; i++)
655 SCTP_SI(stream, ntohs(str_p[i]))->mid = 0; 675 SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
656 } else { 676 else
657 for (i = 0; i < stream->incnt; i++) 677 for (i = 0; i < stream->incnt; i++)
658 SCTP_SI(stream, i)->mid = 0; 678 SCTP_SI(stream, i)->mid = 0;
659 }
660 679
661 result = SCTP_STRRESET_PERFORMED; 680 result = SCTP_STRRESET_PERFORMED;
662 681
663 *evp = sctp_ulpevent_make_stream_reset_event(asoc, 682 *evp = sctp_ulpevent_make_stream_reset_event(asoc,
664 flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p, 683 SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
665 GFP_ATOMIC);
666 684
667out: 685out:
668 sctp_update_strreset_result(asoc, result); 686 sctp_update_strreset_result(asoc, result);
@@ -738,9 +756,6 @@ struct sctp_chunk *sctp_process_strreset_inreq(
738 756
739 result = SCTP_STRRESET_PERFORMED; 757 result = SCTP_STRRESET_PERFORMED;
740 758
741 *evp = sctp_ulpevent_make_stream_reset_event(asoc,
742 SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
743
744out: 759out:
745 sctp_update_strreset_result(asoc, result); 760 sctp_update_strreset_result(asoc, result);
746err: 761err:
@@ -873,6 +888,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
873 if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) 888 if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
874 goto out; 889 goto out;
875 890
891 in = ntohs(addstrm->number_of_streams);
892 incnt = stream->incnt + in;
893 if (!in || incnt > SCTP_MAX_STREAM)
894 goto out;
895
896 if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
897 goto out;
898
876 if (asoc->strreset_chunk) { 899 if (asoc->strreset_chunk) {
877 if (!sctp_chunk_lookup_strreset_param( 900 if (!sctp_chunk_lookup_strreset_param(
878 asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) { 901 asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
@@ -896,14 +919,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
896 } 919 }
897 } 920 }
898 921
899 in = ntohs(addstrm->number_of_streams);
900 incnt = stream->incnt + in;
901 if (!in || incnt > SCTP_MAX_STREAM)
902 goto out;
903
904 if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
905 goto out;
906
907 stream->incnt = incnt; 922 stream->incnt = incnt;
908 923
909 result = SCTP_STRRESET_PERFORMED; 924 result = SCTP_STRRESET_PERFORMED;
@@ -973,9 +988,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
973 988
974 result = SCTP_STRRESET_PERFORMED; 989 result = SCTP_STRRESET_PERFORMED;
975 990
976 *evp = sctp_ulpevent_make_stream_change_event(asoc,
977 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
978
979out: 991out:
980 sctp_update_strreset_result(asoc, result); 992 sctp_update_strreset_result(asoc, result);
981err: 993err:
@@ -1036,10 +1048,10 @@ struct sctp_chunk *sctp_process_strreset_resp(
1036 sout->mid_uo = 0; 1048 sout->mid_uo = 0;
1037 } 1049 }
1038 } 1050 }
1039
1040 flags = SCTP_STREAM_RESET_OUTGOING_SSN;
1041 } 1051 }
1042 1052
1053 flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
1054
1043 for (i = 0; i < stream->outcnt; i++) 1055 for (i = 0; i < stream->outcnt; i++)
1044 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; 1056 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
1045 1057
@@ -1058,6 +1070,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
1058 nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 1070 nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
1059 sizeof(__u16); 1071 sizeof(__u16);
1060 1072
1073 flags |= SCTP_STREAM_RESET_INCOMING_SSN;
1074
1061 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, 1075 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
1062 nums, str_p, GFP_ATOMIC); 1076 nums, str_p, GFP_ATOMIC);
1063 } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) { 1077 } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 033696e6f74f..ad158d311ffa 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -207,7 +207,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
207 207
208 /* When a data chunk is sent, reset the heartbeat interval. */ 208 /* When a data chunk is sent, reset the heartbeat interval. */
209 expires = jiffies + sctp_transport_timeout(transport); 209 expires = jiffies + sctp_transport_timeout(transport);
210 if (time_before(transport->hb_timer.expires, expires) && 210 if ((time_before(transport->hb_timer.expires, expires) ||
211 !timer_pending(&transport->hb_timer)) &&
211 !mod_timer(&transport->hb_timer, 212 !mod_timer(&transport->hb_timer,
212 expires + prandom_u32_max(transport->rto))) 213 expires + prandom_u32_max(transport->rto)))
213 sctp_transport_hold(transport); 214 sctp_transport_hold(transport);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index c4da4a78d369..b04a813fc865 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -146,6 +146,9 @@ static int smc_release(struct socket *sock)
146 sock_set_flag(sk, SOCK_DEAD); 146 sock_set_flag(sk, SOCK_DEAD);
147 sk->sk_shutdown |= SHUTDOWN_MASK; 147 sk->sk_shutdown |= SHUTDOWN_MASK;
148 } 148 }
149
150 sk->sk_prot->unhash(sk);
151
149 if (smc->clcsock) { 152 if (smc->clcsock) {
150 if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { 153 if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
151 /* wake up clcsock accept */ 154 /* wake up clcsock accept */
@@ -170,7 +173,6 @@ static int smc_release(struct socket *sock)
170 smc_conn_free(&smc->conn); 173 smc_conn_free(&smc->conn);
171 release_sock(sk); 174 release_sock(sk);
172 175
173 sk->sk_prot->unhash(sk);
174 sock_put(sk); /* final sock_put */ 176 sock_put(sk); /* final sock_put */
175out: 177out:
176 return rc; 178 return rc;
@@ -1503,6 +1505,11 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1503 1505
1504 smc = smc_sk(sk); 1506 smc = smc_sk(sk);
1505 lock_sock(sk); 1507 lock_sock(sk);
1508 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1509 /* socket was connected before, no more data to read */
1510 rc = 0;
1511 goto out;
1512 }
1506 if ((sk->sk_state == SMC_INIT) || 1513 if ((sk->sk_state == SMC_INIT) ||
1507 (sk->sk_state == SMC_LISTEN) || 1514 (sk->sk_state == SMC_LISTEN) ||
1508 (sk->sk_state == SMC_CLOSED)) 1515 (sk->sk_state == SMC_CLOSED))
@@ -1838,7 +1845,11 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1838 1845
1839 smc = smc_sk(sk); 1846 smc = smc_sk(sk);
1840 lock_sock(sk); 1847 lock_sock(sk);
1841 1848 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1849 /* socket was connected before, no more data to read */
1850 rc = 0;
1851 goto out;
1852 }
1842 if (sk->sk_state == SMC_INIT || 1853 if (sk->sk_state == SMC_INIT ||
1843 sk->sk_state == SMC_LISTEN || 1854 sk->sk_state == SMC_LISTEN ||
1844 sk->sk_state == SMC_CLOSED) 1855 sk->sk_state == SMC_CLOSED)
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 5721416d0605..adbdf195eb08 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -113,9 +113,9 @@ struct smc_host_cdc_msg { /* Connection Data Control message */
113} __aligned(8); 113} __aligned(8);
114 114
115enum smc_urg_state { 115enum smc_urg_state {
116 SMC_URG_VALID, /* data present */ 116 SMC_URG_VALID = 1, /* data present */
117 SMC_URG_NOTYET, /* data pending */ 117 SMC_URG_NOTYET = 2, /* data pending */
118 SMC_URG_READ /* data was already read */ 118 SMC_URG_READ = 3, /* data was already read */
119}; 119};
120 120
121struct smc_connection { 121struct smc_connection {
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index db83332ac1c8..fb07ad8d69a6 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -21,13 +21,6 @@
21 21
22/********************************** send *************************************/ 22/********************************** send *************************************/
23 23
24struct smc_cdc_tx_pend {
25 struct smc_connection *conn; /* socket connection */
26 union smc_host_cursor cursor; /* tx sndbuf cursor sent */
27 union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
28 u16 ctrl_seq; /* conn. tx sequence # */
29};
30
31/* handler for send/transmission completion of a CDC msg */ 24/* handler for send/transmission completion of a CDC msg */
32static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, 25static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
33 struct smc_link *link, 26 struct smc_link *link,
@@ -61,12 +54,14 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
61 54
62int smc_cdc_get_free_slot(struct smc_connection *conn, 55int smc_cdc_get_free_slot(struct smc_connection *conn,
63 struct smc_wr_buf **wr_buf, 56 struct smc_wr_buf **wr_buf,
57 struct smc_rdma_wr **wr_rdma_buf,
64 struct smc_cdc_tx_pend **pend) 58 struct smc_cdc_tx_pend **pend)
65{ 59{
66 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; 60 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
67 int rc; 61 int rc;
68 62
69 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, 63 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
64 wr_rdma_buf,
70 (struct smc_wr_tx_pend_priv **)pend); 65 (struct smc_wr_tx_pend_priv **)pend);
71 if (!conn->alert_token_local) 66 if (!conn->alert_token_local)
72 /* abnormal termination */ 67 /* abnormal termination */
@@ -96,6 +91,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
96 struct smc_wr_buf *wr_buf, 91 struct smc_wr_buf *wr_buf,
97 struct smc_cdc_tx_pend *pend) 92 struct smc_cdc_tx_pend *pend)
98{ 93{
94 union smc_host_cursor cfed;
99 struct smc_link *link; 95 struct smc_link *link;
100 int rc; 96 int rc;
101 97
@@ -105,12 +101,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
105 101
106 conn->tx_cdc_seq++; 102 conn->tx_cdc_seq++;
107 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; 103 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
108 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, 104 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
109 &conn->local_tx_ctrl, conn);
110 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); 105 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
111 if (!rc) 106 if (!rc)
112 smc_curs_copy(&conn->rx_curs_confirmed, 107 smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
113 &conn->local_tx_ctrl.cons, conn);
114 108
115 return rc; 109 return rc;
116} 110}
@@ -121,11 +115,14 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
121 struct smc_wr_buf *wr_buf; 115 struct smc_wr_buf *wr_buf;
122 int rc; 116 int rc;
123 117
124 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); 118 rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend);
125 if (rc) 119 if (rc)
126 return rc; 120 return rc;
127 121
128 return smc_cdc_msg_send(conn, wr_buf, pend); 122 spin_lock_bh(&conn->send_lock);
123 rc = smc_cdc_msg_send(conn, wr_buf, pend);
124 spin_unlock_bh(&conn->send_lock);
125 return rc;
129} 126}
130 127
131int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) 128int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index b5bfe38c7f9b..f1cdde9d4b89 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -160,7 +160,9 @@ static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
160#endif 160#endif
161} 161}
162 162
163/* calculate cursor difference between old and new, where old <= new */ 163/* calculate cursor difference between old and new, where old <= new and
164 * difference cannot exceed size
165 */
164static inline int smc_curs_diff(unsigned int size, 166static inline int smc_curs_diff(unsigned int size,
165 union smc_host_cursor *old, 167 union smc_host_cursor *old,
166 union smc_host_cursor *new) 168 union smc_host_cursor *new)
@@ -185,28 +187,51 @@ static inline int smc_curs_comp(unsigned int size,
185 return smc_curs_diff(size, old, new); 187 return smc_curs_diff(size, old, new);
186} 188}
187 189
190/* calculate cursor difference between old and new, where old <= new and
191 * difference may exceed size
192 */
193static inline int smc_curs_diff_large(unsigned int size,
194 union smc_host_cursor *old,
195 union smc_host_cursor *new)
196{
197 if (old->wrap < new->wrap)
198 return min_t(int,
199 (size - old->count) + new->count +
200 (new->wrap - old->wrap - 1) * size,
201 size);
202
203 if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */
204 return min_t(int,
205 (size - old->count) + new->count +
206 (new->wrap + 0xffff - old->wrap) * size,
207 size);
208
209 return max_t(int, 0, (new->count - old->count));
210}
211
188static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, 212static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
189 union smc_host_cursor *local, 213 union smc_host_cursor *local,
214 union smc_host_cursor *save,
190 struct smc_connection *conn) 215 struct smc_connection *conn)
191{ 216{
192 union smc_host_cursor temp; 217 smc_curs_copy(save, local, conn);
193 218 peer->count = htonl(save->count);
194 smc_curs_copy(&temp, local, conn); 219 peer->wrap = htons(save->wrap);
195 peer->count = htonl(temp.count);
196 peer->wrap = htons(temp.wrap);
197 /* peer->reserved = htons(0); must be ensured by caller */ 220 /* peer->reserved = htons(0); must be ensured by caller */
198} 221}
199 222
200static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, 223static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
201 struct smc_host_cdc_msg *local, 224 struct smc_connection *conn,
202 struct smc_connection *conn) 225 union smc_host_cursor *save)
203{ 226{
227 struct smc_host_cdc_msg *local = &conn->local_tx_ctrl;
228
204 peer->common.type = local->common.type; 229 peer->common.type = local->common.type;
205 peer->len = local->len; 230 peer->len = local->len;
206 peer->seqno = htons(local->seqno); 231 peer->seqno = htons(local->seqno);
207 peer->token = htonl(local->token); 232 peer->token = htonl(local->token);
208 smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn); 233 smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn);
209 smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn); 234 smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn);
210 peer->prod_flags = local->prod_flags; 235 peer->prod_flags = local->prod_flags;
211 peer->conn_state_flags = local->conn_state_flags; 236 peer->conn_state_flags = local->conn_state_flags;
212} 237}
@@ -270,10 +295,16 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
270 smcr_cdc_msg_to_host(local, peer, conn); 295 smcr_cdc_msg_to_host(local, peer, conn);
271} 296}
272 297
273struct smc_cdc_tx_pend; 298struct smc_cdc_tx_pend {
299 struct smc_connection *conn; /* socket connection */
300 union smc_host_cursor cursor; /* tx sndbuf cursor sent */
301 union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
302 u16 ctrl_seq; /* conn. tx sequence # */
303};
274 304
275int smc_cdc_get_free_slot(struct smc_connection *conn, 305int smc_cdc_get_free_slot(struct smc_connection *conn,
276 struct smc_wr_buf **wr_buf, 306 struct smc_wr_buf **wr_buf,
307 struct smc_rdma_wr **wr_rdma_buf,
277 struct smc_cdc_tx_pend **pend); 308 struct smc_cdc_tx_pend **pend);
278void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); 309void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
279int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, 310int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 776e9dfc915d..d53fd588d1f5 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -378,7 +378,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
378 vec.iov_len = sizeof(struct smc_clc_msg_decline); 378 vec.iov_len = sizeof(struct smc_clc_msg_decline);
379 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, 379 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
380 sizeof(struct smc_clc_msg_decline)); 380 sizeof(struct smc_clc_msg_decline));
381 if (len < sizeof(struct smc_clc_msg_decline)) 381 if (len < 0 || len < sizeof(struct smc_clc_msg_decline))
382 len = -EPROTO; 382 len = -EPROTO;
383 return len > 0 ? 0 : len; 383 return len > 0 ? 0 : len;
384} 384}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index ea2b87f29469..e39cadda1bf5 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -345,14 +345,7 @@ static void smc_close_passive_work(struct work_struct *work)
345 345
346 switch (sk->sk_state) { 346 switch (sk->sk_state) {
347 case SMC_INIT: 347 case SMC_INIT:
348 if (atomic_read(&conn->bytes_to_rcv) || 348 sk->sk_state = SMC_APPCLOSEWAIT1;
349 (rxflags->peer_done_writing &&
350 !smc_cdc_rxed_any_close(conn))) {
351 sk->sk_state = SMC_APPCLOSEWAIT1;
352 } else {
353 sk->sk_state = SMC_CLOSED;
354 sock_put(sk); /* passive closing */
355 }
356 break; 349 break;
357 case SMC_ACTIVE: 350 case SMC_ACTIVE:
358 sk->sk_state = SMC_APPCLOSEWAIT1; 351 sk->sk_state = SMC_APPCLOSEWAIT1;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 35c1cdc93e1c..aa1c551cee81 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -128,6 +128,8 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
128{ 128{
129 struct smc_link_group *lgr = conn->lgr; 129 struct smc_link_group *lgr = conn->lgr;
130 130
131 if (!lgr)
132 return;
131 write_lock_bh(&lgr->conns_lock); 133 write_lock_bh(&lgr->conns_lock);
132 if (conn->alert_token_local) { 134 if (conn->alert_token_local) {
133 __smc_lgr_unregister_conn(conn); 135 __smc_lgr_unregister_conn(conn);
@@ -300,13 +302,13 @@ static void smc_buf_unuse(struct smc_connection *conn,
300 conn->sndbuf_desc->used = 0; 302 conn->sndbuf_desc->used = 0;
301 if (conn->rmb_desc) { 303 if (conn->rmb_desc) {
302 if (!conn->rmb_desc->regerr) { 304 if (!conn->rmb_desc->regerr) {
303 conn->rmb_desc->used = 0;
304 if (!lgr->is_smcd) { 305 if (!lgr->is_smcd) {
305 /* unregister rmb with peer */ 306 /* unregister rmb with peer */
306 smc_llc_do_delete_rkey( 307 smc_llc_do_delete_rkey(
307 &lgr->lnk[SMC_SINGLE_LINK], 308 &lgr->lnk[SMC_SINGLE_LINK],
308 conn->rmb_desc); 309 conn->rmb_desc);
309 } 310 }
311 conn->rmb_desc->used = 0;
310 } else { 312 } else {
311 /* buf registration failed, reuse not possible */ 313 /* buf registration failed, reuse not possible */
312 write_lock_bh(&lgr->rmbs_lock); 314 write_lock_bh(&lgr->rmbs_lock);
@@ -628,6 +630,8 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
628 local_contact = SMC_REUSE_CONTACT; 630 local_contact = SMC_REUSE_CONTACT;
629 conn->lgr = lgr; 631 conn->lgr = lgr;
630 smc_lgr_register_conn(conn); /* add smc conn to lgr */ 632 smc_lgr_register_conn(conn); /* add smc conn to lgr */
633 if (delayed_work_pending(&lgr->free_work))
634 cancel_delayed_work(&lgr->free_work);
631 write_unlock_bh(&lgr->conns_lock); 635 write_unlock_bh(&lgr->conns_lock);
632 break; 636 break;
633 } 637 }
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index b00287989a3d..8806d2afa6ed 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -52,6 +52,24 @@ enum smc_wr_reg_state {
52 FAILED /* ib_wr_reg_mr response: failure */ 52 FAILED /* ib_wr_reg_mr response: failure */
53}; 53};
54 54
55struct smc_rdma_sge { /* sges for RDMA writes */
56 struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE];
57};
58
59#define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per
60 * message send
61 */
62
63struct smc_rdma_sges { /* sges per message send */
64 struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES];
65};
66
67struct smc_rdma_wr { /* work requests per message
68 * send
69 */
70 struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES];
71};
72
55struct smc_link { 73struct smc_link {
56 struct smc_ib_device *smcibdev; /* ib-device */ 74 struct smc_ib_device *smcibdev; /* ib-device */
57 u8 ibport; /* port - values 1 | 2 */ 75 u8 ibport; /* port - values 1 | 2 */
@@ -64,6 +82,8 @@ struct smc_link {
64 struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ 82 struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */
65 struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ 83 struct ib_send_wr *wr_tx_ibs; /* WR send meta data */
66 struct ib_sge *wr_tx_sges; /* WR send gather meta data */ 84 struct ib_sge *wr_tx_sges; /* WR send gather meta data */
85 struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/
86 struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */
67 struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ 87 struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */
68 /* above four vectors have wr_tx_cnt elements and use the same index */ 88 /* above four vectors have wr_tx_cnt elements and use the same index */
69 dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ 89 dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index e519ef29c0ff..76487a16934e 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -289,8 +289,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
289 289
290static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) 290static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
291{ 291{
292 struct smc_ib_device *smcibdev = 292 struct smc_link *lnk = (struct smc_link *)priv;
293 (struct smc_ib_device *)ibevent->device; 293 struct smc_ib_device *smcibdev = lnk->smcibdev;
294 u8 port_idx; 294 u8 port_idx;
295 295
296 switch (ibevent->event) { 296 switch (ibevent->event) {
@@ -298,7 +298,7 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
298 case IB_EVENT_GID_CHANGE: 298 case IB_EVENT_GID_CHANGE:
299 case IB_EVENT_PORT_ERR: 299 case IB_EVENT_PORT_ERR:
300 case IB_EVENT_QP_ACCESS_ERR: 300 case IB_EVENT_QP_ACCESS_ERR:
301 port_idx = ibevent->element.port_num - 1; 301 port_idx = ibevent->element.qp->port - 1;
302 set_bit(port_idx, &smcibdev->port_event_mask); 302 set_bit(port_idx, &smcibdev->port_event_mask);
303 schedule_work(&smcibdev->port_event_work); 303 schedule_work(&smcibdev->port_event_work);
304 break; 304 break;
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index a6d3623d06f4..4fd60c522802 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -166,7 +166,8 @@ static int smc_llc_add_pending_send(struct smc_link *link,
166{ 166{
167 int rc; 167 int rc;
168 168
169 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend); 169 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
170 pend);
170 if (rc < 0) 171 if (rc < 0)
171 return rc; 172 return rc;
172 BUILD_BUG_ON_MSG( 173 BUILD_BUG_ON_MSG(
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 7cb3e4f07c10..632c3109dee5 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -27,7 +27,7 @@
27static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { 27static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = {
28 [SMC_PNETID_NAME] = { 28 [SMC_PNETID_NAME] = {
29 .type = NLA_NUL_STRING, 29 .type = NLA_NUL_STRING,
30 .len = SMC_MAX_PNETID_LEN - 1 30 .len = SMC_MAX_PNETID_LEN
31 }, 31 },
32 [SMC_PNETID_ETHNAME] = { 32 [SMC_PNETID_ETHNAME] = {
33 .type = NLA_NUL_STRING, 33 .type = NLA_NUL_STRING,
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index d8366ed51757..f93f3580c100 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -165,12 +165,11 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
165 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; 165 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
166 166
167 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { 167 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
168 if (send_done)
169 return send_done;
168 rc = smc_tx_wait(smc, msg->msg_flags); 170 rc = smc_tx_wait(smc, msg->msg_flags);
169 if (rc) { 171 if (rc)
170 if (send_done)
171 return send_done;
172 goto out_err; 172 goto out_err;
173 }
174 continue; 173 continue;
175 } 174 }
176 175
@@ -267,27 +266,23 @@ int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
267 266
268/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ 267/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
269static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, 268static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
270 int num_sges, struct ib_sge sges[]) 269 int num_sges, struct ib_rdma_wr *rdma_wr)
271{ 270{
272 struct smc_link_group *lgr = conn->lgr; 271 struct smc_link_group *lgr = conn->lgr;
273 struct ib_rdma_wr rdma_wr;
274 struct smc_link *link; 272 struct smc_link *link;
275 int rc; 273 int rc;
276 274
277 memset(&rdma_wr, 0, sizeof(rdma_wr));
278 link = &lgr->lnk[SMC_SINGLE_LINK]; 275 link = &lgr->lnk[SMC_SINGLE_LINK];
279 rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link); 276 rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link);
280 rdma_wr.wr.sg_list = sges; 277 rdma_wr->wr.num_sge = num_sges;
281 rdma_wr.wr.num_sge = num_sges; 278 rdma_wr->remote_addr =
282 rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
283 rdma_wr.remote_addr =
284 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + 279 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
285 /* RMBE within RMB */ 280 /* RMBE within RMB */
286 conn->tx_off + 281 conn->tx_off +
287 /* offset within RMBE */ 282 /* offset within RMBE */
288 peer_rmbe_offset; 283 peer_rmbe_offset;
289 rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; 284 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
290 rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL); 285 rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
291 if (rc) { 286 if (rc) {
292 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; 287 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
293 smc_lgr_terminate(lgr); 288 smc_lgr_terminate(lgr);
@@ -314,24 +309,25 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn,
314/* SMC-R helper for smc_tx_rdma_writes() */ 309/* SMC-R helper for smc_tx_rdma_writes() */
315static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, 310static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
316 size_t src_off, size_t src_len, 311 size_t src_off, size_t src_len,
317 size_t dst_off, size_t dst_len) 312 size_t dst_off, size_t dst_len,
313 struct smc_rdma_wr *wr_rdma_buf)
318{ 314{
319 dma_addr_t dma_addr = 315 dma_addr_t dma_addr =
320 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); 316 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
321 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
322 int src_len_sum = src_len, dst_len_sum = dst_len; 317 int src_len_sum = src_len, dst_len_sum = dst_len;
323 struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
324 int sent_count = src_off; 318 int sent_count = src_off;
325 int srcchunk, dstchunk; 319 int srcchunk, dstchunk;
326 int num_sges; 320 int num_sges;
327 int rc; 321 int rc;
328 322
329 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 323 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
324 struct ib_sge *sge =
325 wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list;
326
330 num_sges = 0; 327 num_sges = 0;
331 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 328 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
332 sges[srcchunk].addr = dma_addr + src_off; 329 sge[srcchunk].addr = dma_addr + src_off;
333 sges[srcchunk].length = src_len; 330 sge[srcchunk].length = src_len;
334 sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
335 num_sges++; 331 num_sges++;
336 332
337 src_off += src_len; 333 src_off += src_len;
@@ -344,7 +340,8 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
344 src_len = dst_len - src_len; /* remainder */ 340 src_len = dst_len - src_len; /* remainder */
345 src_len_sum += src_len; 341 src_len_sum += src_len;
346 } 342 }
347 rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); 343 rc = smc_tx_rdma_write(conn, dst_off, num_sges,
344 &wr_rdma_buf->wr_tx_rdma[dstchunk]);
348 if (rc) 345 if (rc)
349 return rc; 346 return rc;
350 if (dst_len_sum == len) 347 if (dst_len_sum == len)
@@ -403,7 +400,8 @@ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
403/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; 400/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
404 * usable snd_wnd as max transmit 401 * usable snd_wnd as max transmit
405 */ 402 */
406static int smc_tx_rdma_writes(struct smc_connection *conn) 403static int smc_tx_rdma_writes(struct smc_connection *conn,
404 struct smc_rdma_wr *wr_rdma_buf)
407{ 405{
408 size_t len, src_len, dst_off, dst_len; /* current chunk values */ 406 size_t len, src_len, dst_off, dst_len; /* current chunk values */
409 union smc_host_cursor sent, prep, prod, cons; 407 union smc_host_cursor sent, prep, prod, cons;
@@ -464,7 +462,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
464 dst_off, dst_len); 462 dst_off, dst_len);
465 else 463 else
466 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, 464 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
467 dst_off, dst_len); 465 dst_off, dst_len, wr_rdma_buf);
468 if (rc) 466 if (rc)
469 return rc; 467 return rc;
470 468
@@ -485,31 +483,30 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
485static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 483static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
486{ 484{
487 struct smc_cdc_producer_flags *pflags; 485 struct smc_cdc_producer_flags *pflags;
486 struct smc_rdma_wr *wr_rdma_buf;
488 struct smc_cdc_tx_pend *pend; 487 struct smc_cdc_tx_pend *pend;
489 struct smc_wr_buf *wr_buf; 488 struct smc_wr_buf *wr_buf;
490 int rc; 489 int rc;
491 490
492 spin_lock_bh(&conn->send_lock); 491 rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend);
493 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
494 if (rc < 0) { 492 if (rc < 0) {
495 if (rc == -EBUSY) { 493 if (rc == -EBUSY) {
496 struct smc_sock *smc = 494 struct smc_sock *smc =
497 container_of(conn, struct smc_sock, conn); 495 container_of(conn, struct smc_sock, conn);
498 496
499 if (smc->sk.sk_err == ECONNABORTED) { 497 if (smc->sk.sk_err == ECONNABORTED)
500 rc = sock_error(&smc->sk); 498 return sock_error(&smc->sk);
501 goto out_unlock;
502 }
503 rc = 0; 499 rc = 0;
504 if (conn->alert_token_local) /* connection healthy */ 500 if (conn->alert_token_local) /* connection healthy */
505 mod_delayed_work(system_wq, &conn->tx_work, 501 mod_delayed_work(system_wq, &conn->tx_work,
506 SMC_TX_WORK_DELAY); 502 SMC_TX_WORK_DELAY);
507 } 503 }
508 goto out_unlock; 504 return rc;
509 } 505 }
510 506
507 spin_lock_bh(&conn->send_lock);
511 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { 508 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) {
512 rc = smc_tx_rdma_writes(conn); 509 rc = smc_tx_rdma_writes(conn, wr_rdma_buf);
513 if (rc) { 510 if (rc) {
514 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], 511 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
515 (struct smc_wr_tx_pend_priv *)pend); 512 (struct smc_wr_tx_pend_priv *)pend);
@@ -536,7 +533,7 @@ static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
536 533
537 spin_lock_bh(&conn->send_lock); 534 spin_lock_bh(&conn->send_lock);
538 if (!pflags->urg_data_present) 535 if (!pflags->urg_data_present)
539 rc = smc_tx_rdma_writes(conn); 536 rc = smc_tx_rdma_writes(conn, NULL);
540 if (!rc) 537 if (!rc)
541 rc = smcd_cdc_msg_send(conn); 538 rc = smcd_cdc_msg_send(conn);
542 539
@@ -598,7 +595,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
598 if (to_confirm > conn->rmbe_update_limit) { 595 if (to_confirm > conn->rmbe_update_limit) {
599 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); 596 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
600 sender_free = conn->rmb_desc->len - 597 sender_free = conn->rmb_desc->len -
601 smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); 598 smc_curs_diff_large(conn->rmb_desc->len,
599 &cfed, &prod);
602 } 600 }
603 601
604 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || 602 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index c2694750a6a8..253aa75dc2b6 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -160,6 +160,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
160 * @link: Pointer to smc_link used to later send the message. 160 * @link: Pointer to smc_link used to later send the message.
161 * @handler: Send completion handler function pointer. 161 * @handler: Send completion handler function pointer.
162 * @wr_buf: Out value returns pointer to message buffer. 162 * @wr_buf: Out value returns pointer to message buffer.
163 * @wr_rdma_buf: Out value returns pointer to rdma work request.
163 * @wr_pend_priv: Out value returns pointer serving as handler context. 164 * @wr_pend_priv: Out value returns pointer serving as handler context.
164 * 165 *
165 * Return: 0 on success, or -errno on error. 166 * Return: 0 on success, or -errno on error.
@@ -167,6 +168,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
167int smc_wr_tx_get_free_slot(struct smc_link *link, 168int smc_wr_tx_get_free_slot(struct smc_link *link,
168 smc_wr_tx_handler handler, 169 smc_wr_tx_handler handler,
169 struct smc_wr_buf **wr_buf, 170 struct smc_wr_buf **wr_buf,
171 struct smc_rdma_wr **wr_rdma_buf,
170 struct smc_wr_tx_pend_priv **wr_pend_priv) 172 struct smc_wr_tx_pend_priv **wr_pend_priv)
171{ 173{
172 struct smc_wr_tx_pend *wr_pend; 174 struct smc_wr_tx_pend *wr_pend;
@@ -204,6 +206,8 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
204 wr_ib = &link->wr_tx_ibs[idx]; 206 wr_ib = &link->wr_tx_ibs[idx];
205 wr_ib->wr_id = wr_id; 207 wr_ib->wr_id = wr_id;
206 *wr_buf = &link->wr_tx_bufs[idx]; 208 *wr_buf = &link->wr_tx_bufs[idx];
209 if (wr_rdma_buf)
210 *wr_rdma_buf = &link->wr_tx_rdmas[idx];
207 *wr_pend_priv = &wr_pend->priv; 211 *wr_pend_priv = &wr_pend->priv;
208 return 0; 212 return 0;
209} 213}
@@ -218,10 +222,10 @@ int smc_wr_tx_put_slot(struct smc_link *link,
218 u32 idx = pend->idx; 222 u32 idx = pend->idx;
219 223
220 /* clear the full struct smc_wr_tx_pend including .priv */ 224 /* clear the full struct smc_wr_tx_pend including .priv */
221 memset(&link->wr_tx_pends[pend->idx], 0, 225 memset(&link->wr_tx_pends[idx], 0,
222 sizeof(link->wr_tx_pends[pend->idx])); 226 sizeof(link->wr_tx_pends[idx]));
223 memset(&link->wr_tx_bufs[pend->idx], 0, 227 memset(&link->wr_tx_bufs[idx], 0,
224 sizeof(link->wr_tx_bufs[pend->idx])); 228 sizeof(link->wr_tx_bufs[idx]));
225 test_and_clear_bit(idx, link->wr_tx_mask); 229 test_and_clear_bit(idx, link->wr_tx_mask);
226 return 1; 230 return 1;
227 } 231 }
@@ -465,12 +469,26 @@ static void smc_wr_init_sge(struct smc_link *lnk)
465 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; 469 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
466 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; 470 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
467 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; 471 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
472 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
473 lnk->roce_pd->local_dma_lkey;
474 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
475 lnk->roce_pd->local_dma_lkey;
476 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
477 lnk->roce_pd->local_dma_lkey;
478 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
479 lnk->roce_pd->local_dma_lkey;
468 lnk->wr_tx_ibs[i].next = NULL; 480 lnk->wr_tx_ibs[i].next = NULL;
469 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; 481 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
470 lnk->wr_tx_ibs[i].num_sge = 1; 482 lnk->wr_tx_ibs[i].num_sge = 1;
471 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; 483 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
472 lnk->wr_tx_ibs[i].send_flags = 484 lnk->wr_tx_ibs[i].send_flags =
473 IB_SEND_SIGNALED | IB_SEND_SOLICITED; 485 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
486 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
487 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
488 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
489 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
490 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
491 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
474 } 492 }
475 for (i = 0; i < lnk->wr_rx_cnt; i++) { 493 for (i = 0; i < lnk->wr_rx_cnt; i++) {
476 lnk->wr_rx_sges[i].addr = 494 lnk->wr_rx_sges[i].addr =
@@ -521,8 +539,12 @@ void smc_wr_free_link_mem(struct smc_link *lnk)
521 lnk->wr_tx_mask = NULL; 539 lnk->wr_tx_mask = NULL;
522 kfree(lnk->wr_tx_sges); 540 kfree(lnk->wr_tx_sges);
523 lnk->wr_tx_sges = NULL; 541 lnk->wr_tx_sges = NULL;
542 kfree(lnk->wr_tx_rdma_sges);
543 lnk->wr_tx_rdma_sges = NULL;
524 kfree(lnk->wr_rx_sges); 544 kfree(lnk->wr_rx_sges);
525 lnk->wr_rx_sges = NULL; 545 lnk->wr_rx_sges = NULL;
546 kfree(lnk->wr_tx_rdmas);
547 lnk->wr_tx_rdmas = NULL;
526 kfree(lnk->wr_rx_ibs); 548 kfree(lnk->wr_rx_ibs);
527 lnk->wr_rx_ibs = NULL; 549 lnk->wr_rx_ibs = NULL;
528 kfree(lnk->wr_tx_ibs); 550 kfree(lnk->wr_tx_ibs);
@@ -552,10 +574,20 @@ int smc_wr_alloc_link_mem(struct smc_link *link)
552 GFP_KERNEL); 574 GFP_KERNEL);
553 if (!link->wr_rx_ibs) 575 if (!link->wr_rx_ibs)
554 goto no_mem_wr_tx_ibs; 576 goto no_mem_wr_tx_ibs;
577 link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
578 sizeof(link->wr_tx_rdmas[0]),
579 GFP_KERNEL);
580 if (!link->wr_tx_rdmas)
581 goto no_mem_wr_rx_ibs;
582 link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
583 sizeof(link->wr_tx_rdma_sges[0]),
584 GFP_KERNEL);
585 if (!link->wr_tx_rdma_sges)
586 goto no_mem_wr_tx_rdmas;
555 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), 587 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
556 GFP_KERNEL); 588 GFP_KERNEL);
557 if (!link->wr_tx_sges) 589 if (!link->wr_tx_sges)
558 goto no_mem_wr_rx_ibs; 590 goto no_mem_wr_tx_rdma_sges;
559 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, 591 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
560 sizeof(link->wr_rx_sges[0]), 592 sizeof(link->wr_rx_sges[0]),
561 GFP_KERNEL); 593 GFP_KERNEL);
@@ -579,6 +611,10 @@ no_mem_wr_rx_sges:
579 kfree(link->wr_rx_sges); 611 kfree(link->wr_rx_sges);
580no_mem_wr_tx_sges: 612no_mem_wr_tx_sges:
581 kfree(link->wr_tx_sges); 613 kfree(link->wr_tx_sges);
614no_mem_wr_tx_rdma_sges:
615 kfree(link->wr_tx_rdma_sges);
616no_mem_wr_tx_rdmas:
617 kfree(link->wr_tx_rdmas);
582no_mem_wr_rx_ibs: 618no_mem_wr_rx_ibs:
583 kfree(link->wr_rx_ibs); 619 kfree(link->wr_rx_ibs);
584no_mem_wr_tx_ibs: 620no_mem_wr_tx_ibs:
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 1d85bb14fd6f..09bf32fd3959 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -85,6 +85,7 @@ void smc_wr_add_dev(struct smc_ib_device *smcibdev);
85 85
86int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, 86int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler,
87 struct smc_wr_buf **wr_buf, 87 struct smc_wr_buf **wr_buf,
88 struct smc_rdma_wr **wrs,
88 struct smc_wr_tx_pend_priv **wr_pend_priv); 89 struct smc_wr_tx_pend_priv **wr_pend_priv);
89int smc_wr_tx_put_slot(struct smc_link *link, 90int smc_wr_tx_put_slot(struct smc_link *link,
90 struct smc_wr_tx_pend_priv *wr_pend_priv); 91 struct smc_wr_tx_pend_priv *wr_pend_priv);
diff --git a/net/socket.c b/net/socket.c
index e89884e2197b..d80d87a395ea 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
941EXPORT_SYMBOL(dlci_ioctl_set); 941EXPORT_SYMBOL(dlci_ioctl_set);
942 942
943static long sock_do_ioctl(struct net *net, struct socket *sock, 943static long sock_do_ioctl(struct net *net, struct socket *sock,
944 unsigned int cmd, unsigned long arg, 944 unsigned int cmd, unsigned long arg)
945 unsigned int ifreq_size)
946{ 945{
947 int err; 946 int err;
948 void __user *argp = (void __user *)arg; 947 void __user *argp = (void __user *)arg;
@@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
968 } else { 967 } else {
969 struct ifreq ifr; 968 struct ifreq ifr;
970 bool need_copyout; 969 bool need_copyout;
971 if (copy_from_user(&ifr, argp, ifreq_size)) 970 if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
972 return -EFAULT; 971 return -EFAULT;
973 err = dev_ioctl(net, cmd, &ifr, &need_copyout); 972 err = dev_ioctl(net, cmd, &ifr, &need_copyout);
974 if (!err && need_copyout) 973 if (!err && need_copyout)
975 if (copy_to_user(argp, &ifr, ifreq_size)) 974 if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
976 return -EFAULT; 975 return -EFAULT;
977 } 976 }
978 return err; 977 return err;
@@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1071 err = open_related_ns(&net->ns, get_net_ns); 1070 err = open_related_ns(&net->ns, get_net_ns);
1072 break; 1071 break;
1073 default: 1072 default:
1074 err = sock_do_ioctl(net, sock, cmd, arg, 1073 err = sock_do_ioctl(net, sock, cmd, arg);
1075 sizeof(struct ifreq));
1076 break; 1074 break;
1077 } 1075 }
1078 return err; 1076 return err;
@@ -2780,8 +2778,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
2780 int err; 2778 int err;
2781 2779
2782 set_fs(KERNEL_DS); 2780 set_fs(KERNEL_DS);
2783 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, 2781 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
2784 sizeof(struct compat_ifreq));
2785 set_fs(old_fs); 2782 set_fs(old_fs);
2786 if (!err) 2783 if (!err)
2787 err = compat_put_timeval(&ktv, up); 2784 err = compat_put_timeval(&ktv, up);
@@ -2797,8 +2794,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
2797 int err; 2794 int err;
2798 2795
2799 set_fs(KERNEL_DS); 2796 set_fs(KERNEL_DS);
2800 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, 2797 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
2801 sizeof(struct compat_ifreq));
2802 set_fs(old_fs); 2798 set_fs(old_fs);
2803 if (!err) 2799 if (!err)
2804 err = compat_put_timespec(&kts, up); 2800 err = compat_put_timespec(&kts, up);
@@ -2994,6 +2990,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
2994 return dev_ioctl(net, cmd, &ifreq, NULL); 2990 return dev_ioctl(net, cmd, &ifreq, NULL);
2995} 2991}
2996 2992
2993static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
2994 unsigned int cmd,
2995 struct compat_ifreq __user *uifr32)
2996{
2997 struct ifreq __user *uifr;
2998 int err;
2999
3000 /* Handle the fact that while struct ifreq has the same *layout* on
3001 * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
3002 * which are handled elsewhere, it still has different *size* due to
3003 * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
3004 * resulting in struct ifreq being 32 and 40 bytes respectively).
3005 * As a result, if the struct happens to be at the end of a page and
3006 * the next page isn't readable/writable, we get a fault. To prevent
3007 * that, copy back and forth to the full size.
3008 */
3009
3010 uifr = compat_alloc_user_space(sizeof(*uifr));
3011 if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
3012 return -EFAULT;
3013
3014 err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
3015
3016 if (!err) {
3017 switch (cmd) {
3018 case SIOCGIFFLAGS:
3019 case SIOCGIFMETRIC:
3020 case SIOCGIFMTU:
3021 case SIOCGIFMEM:
3022 case SIOCGIFHWADDR:
3023 case SIOCGIFINDEX:
3024 case SIOCGIFADDR:
3025 case SIOCGIFBRDADDR:
3026 case SIOCGIFDSTADDR:
3027 case SIOCGIFNETMASK:
3028 case SIOCGIFPFLAGS:
3029 case SIOCGIFTXQLEN:
3030 case SIOCGMIIPHY:
3031 case SIOCGMIIREG:
3032 case SIOCGIFNAME:
3033 if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
3034 err = -EFAULT;
3035 break;
3036 }
3037 }
3038 return err;
3039}
3040
2997static int compat_sioc_ifmap(struct net *net, unsigned int cmd, 3041static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
2998 struct compat_ifreq __user *uifr32) 3042 struct compat_ifreq __user *uifr32)
2999{ 3043{
@@ -3109,8 +3153,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
3109 } 3153 }
3110 3154
3111 set_fs(KERNEL_DS); 3155 set_fs(KERNEL_DS);
3112 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, 3156 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
3113 sizeof(struct compat_ifreq));
3114 set_fs(old_fs); 3157 set_fs(old_fs);
3115 3158
3116out: 3159out:
@@ -3210,21 +3253,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3210 case SIOCSIFTXQLEN: 3253 case SIOCSIFTXQLEN:
3211 case SIOCBRADDIF: 3254 case SIOCBRADDIF:
3212 case SIOCBRDELIF: 3255 case SIOCBRDELIF:
3256 case SIOCGIFNAME:
3213 case SIOCSIFNAME: 3257 case SIOCSIFNAME:
3214 case SIOCGMIIPHY: 3258 case SIOCGMIIPHY:
3215 case SIOCGMIIREG: 3259 case SIOCGMIIREG:
3216 case SIOCSMIIREG: 3260 case SIOCSMIIREG:
3217 case SIOCSARP:
3218 case SIOCGARP:
3219 case SIOCDARP:
3220 case SIOCATMARK:
3221 case SIOCBONDENSLAVE: 3261 case SIOCBONDENSLAVE:
3222 case SIOCBONDRELEASE: 3262 case SIOCBONDRELEASE:
3223 case SIOCBONDSETHWADDR: 3263 case SIOCBONDSETHWADDR:
3224 case SIOCBONDCHANGEACTIVE: 3264 case SIOCBONDCHANGEACTIVE:
3225 case SIOCGIFNAME: 3265 return compat_ifreq_ioctl(net, sock, cmd, argp);
3226 return sock_do_ioctl(net, sock, cmd, arg, 3266
3227 sizeof(struct compat_ifreq)); 3267 case SIOCSARP:
3268 case SIOCGARP:
3269 case SIOCDARP:
3270 case SIOCATMARK:
3271 return sock_do_ioctl(net, sock, cmd, arg);
3228 } 3272 }
3229 3273
3230 return -ENOIOCTLCMD; 3274 return -ENOIOCTLCMD;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 1ff9768f5456..f3023bbc0b7f 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -41,6 +41,9 @@ static unsigned long number_cred_unused;
41 41
42static struct cred machine_cred = { 42static struct cred machine_cred = {
43 .usage = ATOMIC_INIT(1), 43 .usage = ATOMIC_INIT(1),
44#ifdef CONFIG_DEBUG_CREDENTIALS
45 .magic = CRED_MAGIC,
46#endif
44}; 47};
45 48
46/* 49/*
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dc86713b32b6..1531b0219344 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1549,8 +1549,10 @@ gss_marshal(struct rpc_task *task, __be32 *p)
1549 cred_len = p++; 1549 cred_len = p++;
1550 1550
1551 spin_lock(&ctx->gc_seq_lock); 1551 spin_lock(&ctx->gc_seq_lock);
1552 req->rq_seqno = ctx->gc_seq++; 1552 req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
1553 spin_unlock(&ctx->gc_seq_lock); 1553 spin_unlock(&ctx->gc_seq_lock);
1554 if (req->rq_seqno == MAXSEQ)
1555 goto out_expired;
1554 1556
1555 *p++ = htonl((u32) RPC_GSS_VERSION); 1557 *p++ = htonl((u32) RPC_GSS_VERSION);
1556 *p++ = htonl((u32) ctx->gc_proc); 1558 *p++ = htonl((u32) ctx->gc_proc);
@@ -1572,14 +1574,18 @@ gss_marshal(struct rpc_task *task, __be32 *p)
1572 mic.data = (u8 *)(p + 1); 1574 mic.data = (u8 *)(p + 1);
1573 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1575 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1574 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 1576 if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
1575 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1577 goto out_expired;
1576 } else if (maj_stat != 0) { 1578 } else if (maj_stat != 0) {
1577 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 1579 pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
1580 task->tk_status = -EIO;
1578 goto out_put_ctx; 1581 goto out_put_ctx;
1579 } 1582 }
1580 p = xdr_encode_opaque(p, NULL, mic.len); 1583 p = xdr_encode_opaque(p, NULL, mic.len);
1581 gss_put_ctx(ctx); 1584 gss_put_ctx(ctx);
1582 return p; 1585 return p;
1586out_expired:
1587 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1588 task->tk_status = -EKEYEXPIRED;
1583out_put_ctx: 1589out_put_ctx:
1584 gss_put_ctx(ctx); 1590 gss_put_ctx(ctx);
1585 return NULL; 1591 return NULL;
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index fb6656295204..507105127095 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
44 unsigned char *cksum, unsigned char *buf) 44 unsigned char *cksum, unsigned char *buf)
45{ 45{
46 struct crypto_sync_skcipher *cipher; 46 struct crypto_sync_skcipher *cipher;
47 unsigned char plain[8]; 47 unsigned char *plain;
48 s32 code; 48 s32 code;
49 49
50 dprintk("RPC: %s:\n", __func__); 50 dprintk("RPC: %s:\n", __func__);
@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
52 if (IS_ERR(cipher)) 52 if (IS_ERR(cipher))
53 return PTR_ERR(cipher); 53 return PTR_ERR(cipher);
54 54
55 plain = kmalloc(8, GFP_NOFS);
56 if (!plain)
57 return -ENOMEM;
58
55 plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); 59 plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
56 plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); 60 plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
57 plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); 61 plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
@@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
67 71
68 code = krb5_encrypt(cipher, cksum, plain, buf, 8); 72 code = krb5_encrypt(cipher, cksum, plain, buf, 8);
69out: 73out:
74 kfree(plain);
70 crypto_free_sync_skcipher(cipher); 75 crypto_free_sync_skcipher(cipher);
71 return code; 76 return code;
72} 77}
@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
77 u32 seqnum, 82 u32 seqnum,
78 unsigned char *cksum, unsigned char *buf) 83 unsigned char *cksum, unsigned char *buf)
79{ 84{
80 unsigned char plain[8]; 85 unsigned char *plain;
86 s32 code;
81 87
82 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) 88 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
83 return krb5_make_rc4_seq_num(kctx, direction, seqnum, 89 return krb5_make_rc4_seq_num(kctx, direction, seqnum,
84 cksum, buf); 90 cksum, buf);
85 91
92 plain = kmalloc(8, GFP_NOFS);
93 if (!plain)
94 return -ENOMEM;
95
86 plain[0] = (unsigned char) (seqnum & 0xff); 96 plain[0] = (unsigned char) (seqnum & 0xff);
87 plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); 97 plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
88 plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); 98 plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
93 plain[6] = direction; 103 plain[6] = direction;
94 plain[7] = direction; 104 plain[7] = direction;
95 105
96 return krb5_encrypt(key, cksum, plain, buf, 8); 106 code = krb5_encrypt(key, cksum, plain, buf, 8);
107 kfree(plain);
108 return code;
97} 109}
98 110
99static s32 111static s32
@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
101 unsigned char *buf, int *direction, s32 *seqnum) 113 unsigned char *buf, int *direction, s32 *seqnum)
102{ 114{
103 struct crypto_sync_skcipher *cipher; 115 struct crypto_sync_skcipher *cipher;
104 unsigned char plain[8]; 116 unsigned char *plain;
105 s32 code; 117 s32 code;
106 118
107 dprintk("RPC: %s:\n", __func__); 119 dprintk("RPC: %s:\n", __func__);
@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
113 if (code) 125 if (code)
114 goto out; 126 goto out;
115 127
128 plain = kmalloc(8, GFP_NOFS);
129 if (!plain) {
130 code = -ENOMEM;
131 goto out;
132 }
133
116 code = krb5_decrypt(cipher, cksum, buf, plain, 8); 134 code = krb5_decrypt(cipher, cksum, buf, plain, 8);
117 if (code) 135 if (code)
118 goto out; 136 goto out_plain;
119 137
120 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) 138 if ((plain[4] != plain[5]) || (plain[4] != plain[6])
121 || (plain[4] != plain[7])) { 139 || (plain[4] != plain[7])) {
122 code = (s32)KG_BAD_SEQ; 140 code = (s32)KG_BAD_SEQ;
123 goto out; 141 goto out_plain;
124 } 142 }
125 143
126 *direction = plain[4]; 144 *direction = plain[4];
127 145
128 *seqnum = ((plain[0] << 24) | (plain[1] << 16) | 146 *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
129 (plain[2] << 8) | (plain[3])); 147 (plain[2] << 8) | (plain[3]));
148out_plain:
149 kfree(plain);
130out: 150out:
131 crypto_free_sync_skcipher(cipher); 151 crypto_free_sync_skcipher(cipher);
132 return code; 152 return code;
@@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
139 int *direction, u32 *seqnum) 159 int *direction, u32 *seqnum)
140{ 160{
141 s32 code; 161 s32 code;
142 unsigned char plain[8]; 162 unsigned char *plain;
143 struct crypto_sync_skcipher *key = kctx->seq; 163 struct crypto_sync_skcipher *key = kctx->seq;
144 164
145 dprintk("RPC: krb5_get_seq_num:\n"); 165 dprintk("RPC: krb5_get_seq_num:\n");
@@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
147 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) 167 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
148 return krb5_get_rc4_seq_num(kctx, cksum, buf, 168 return krb5_get_rc4_seq_num(kctx, cksum, buf,
149 direction, seqnum); 169 direction, seqnum);
170 plain = kmalloc(8, GFP_NOFS);
171 if (!plain)
172 return -ENOMEM;
150 173
151 if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) 174 if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
152 return code; 175 goto out;
153 176
154 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || 177 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
155 (plain[4] != plain[7])) 178 (plain[4] != plain[7])) {
156 return (s32)KG_BAD_SEQ; 179 code = (s32)KG_BAD_SEQ;
180 goto out;
181 }
157 182
158 *direction = plain[4]; 183 *direction = plain[4];
159 184
160 *seqnum = ((plain[0]) | 185 *seqnum = ((plain[0]) |
161 (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); 186 (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
162 187
163 return 0; 188out:
189 kfree(plain);
190 return code;
164} 191}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 71d9599b5816..d7ec6132c046 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1739,14 +1739,10 @@ rpc_xdr_encode(struct rpc_task *task)
1739 xdr_buf_init(&req->rq_rcv_buf, 1739 xdr_buf_init(&req->rq_rcv_buf,
1740 req->rq_rbuffer, 1740 req->rq_rbuffer,
1741 req->rq_rcvsize); 1741 req->rq_rcvsize);
1742 req->rq_bytes_sent = 0;
1743 1742
1744 p = rpc_encode_header(task); 1743 p = rpc_encode_header(task);
1745 if (p == NULL) { 1744 if (p == NULL)
1746 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1747 rpc_exit(task, -EIO);
1748 return; 1745 return;
1749 }
1750 1746
1751 encode = task->tk_msg.rpc_proc->p_encode; 1747 encode = task->tk_msg.rpc_proc->p_encode;
1752 if (encode == NULL) 1748 if (encode == NULL)
@@ -1771,10 +1767,17 @@ call_encode(struct rpc_task *task)
1771 /* Did the encode result in an error condition? */ 1767 /* Did the encode result in an error condition? */
1772 if (task->tk_status != 0) { 1768 if (task->tk_status != 0) {
1773 /* Was the error nonfatal? */ 1769 /* Was the error nonfatal? */
1774 if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM) 1770 switch (task->tk_status) {
1771 case -EAGAIN:
1772 case -ENOMEM:
1775 rpc_delay(task, HZ >> 4); 1773 rpc_delay(task, HZ >> 4);
1776 else 1774 break;
1775 case -EKEYEXPIRED:
1776 task->tk_action = call_refresh;
1777 break;
1778 default:
1777 rpc_exit(task, task->tk_status); 1779 rpc_exit(task, task->tk_status);
1780 }
1778 return; 1781 return;
1779 } 1782 }
1780 1783
@@ -2336,7 +2339,8 @@ rpc_encode_header(struct rpc_task *task)
2336 *p++ = htonl(clnt->cl_vers); /* program version */ 2339 *p++ = htonl(clnt->cl_vers); /* program version */
2337 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 2340 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
2338 p = rpcauth_marshcred(task, p); 2341 p = rpcauth_marshcred(task, p);
2339 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 2342 if (p)
2343 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
2340 return p; 2344 return p;
2341} 2345}
2342 2346
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
index 45a033329cd4..19bb356230ed 100644
--- a/net/sunrpc/debugfs.c
+++ b/net/sunrpc/debugfs.c
@@ -146,7 +146,7 @@ rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
146 rcu_read_lock(); 146 rcu_read_lock();
147 xprt = rcu_dereference(clnt->cl_xprt); 147 xprt = rcu_dereference(clnt->cl_xprt);
148 /* no "debugfs" dentry? Don't bother with the symlink. */ 148 /* no "debugfs" dentry? Don't bother with the symlink. */
149 if (!xprt->debugfs) { 149 if (IS_ERR_OR_NULL(xprt->debugfs)) {
150 rcu_read_unlock(); 150 rcu_read_unlock();
151 return; 151 return;
152 } 152 }
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 73547d17d3c6..f1ec2110efeb 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
1151 struct rpc_xprt *xprt = req->rq_xprt; 1151 struct rpc_xprt *xprt = req->rq_xprt;
1152 1152
1153 if (xprt_request_need_enqueue_transmit(task, req)) { 1153 if (xprt_request_need_enqueue_transmit(task, req)) {
1154 req->rq_bytes_sent = 0;
1154 spin_lock(&xprt->queue_lock); 1155 spin_lock(&xprt->queue_lock);
1155 /* 1156 /*
1156 * Requests that carry congestion control credits are added 1157 * Requests that carry congestion control credits are added
@@ -1177,7 +1178,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
1177 INIT_LIST_HEAD(&req->rq_xmit2); 1178 INIT_LIST_HEAD(&req->rq_xmit2);
1178 goto out; 1179 goto out;
1179 } 1180 }
1180 } else { 1181 } else if (!req->rq_seqno) {
1181 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1182 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1182 if (pos->rq_task->tk_owner != task->tk_owner) 1183 if (pos->rq_task->tk_owner != task->tk_owner)
1183 continue; 1184 continue;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index cf51b8f9b15f..1f200119268c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
537 DMA_TO_DEVICE); 537 DMA_TO_DEVICE);
538} 538}
539 539
540/* If the xdr_buf has more elements than the device can
541 * transmit in a single RDMA Send, then the reply will
542 * have to be copied into a bounce buffer.
543 */
544static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
545 struct xdr_buf *xdr,
546 __be32 *wr_lst)
547{
548 int elements;
549
550 /* xdr->head */
551 elements = 1;
552
553 /* xdr->pages */
554 if (!wr_lst) {
555 unsigned int remaining;
556 unsigned long pageoff;
557
558 pageoff = xdr->page_base & ~PAGE_MASK;
559 remaining = xdr->page_len;
560 while (remaining) {
561 ++elements;
562 remaining -= min_t(u32, PAGE_SIZE - pageoff,
563 remaining);
564 pageoff = 0;
565 }
566 }
567
568 /* xdr->tail */
569 if (xdr->tail[0].iov_len)
570 ++elements;
571
572 /* assume 1 SGE is needed for the transport header */
573 return elements >= rdma->sc_max_send_sges;
574}
575
576/* The device is not capable of sending the reply directly.
577 * Assemble the elements of @xdr into the transport header
578 * buffer.
579 */
580static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
581 struct svc_rdma_send_ctxt *ctxt,
582 struct xdr_buf *xdr, __be32 *wr_lst)
583{
584 unsigned char *dst, *tailbase;
585 unsigned int taillen;
586
587 dst = ctxt->sc_xprt_buf;
588 dst += ctxt->sc_sges[0].length;
589
590 memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
591 dst += xdr->head[0].iov_len;
592
593 tailbase = xdr->tail[0].iov_base;
594 taillen = xdr->tail[0].iov_len;
595 if (wr_lst) {
596 u32 xdrpad;
597
598 xdrpad = xdr_padsize(xdr->page_len);
599 if (taillen && xdrpad) {
600 tailbase += xdrpad;
601 taillen -= xdrpad;
602 }
603 } else {
604 unsigned int len, remaining;
605 unsigned long pageoff;
606 struct page **ppages;
607
608 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
609 pageoff = xdr->page_base & ~PAGE_MASK;
610 remaining = xdr->page_len;
611 while (remaining) {
612 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
613
614 memcpy(dst, page_address(*ppages), len);
615 remaining -= len;
616 dst += len;
617 pageoff = 0;
618 }
619 }
620
621 if (taillen)
622 memcpy(dst, tailbase, taillen);
623
624 ctxt->sc_sges[0].length += xdr->len;
625 ib_dma_sync_single_for_device(rdma->sc_pd->device,
626 ctxt->sc_sges[0].addr,
627 ctxt->sc_sges[0].length,
628 DMA_TO_DEVICE);
629
630 return 0;
631}
632
540/* svc_rdma_map_reply_msg - Map the buffer holding RPC message 633/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
541 * @rdma: controlling transport 634 * @rdma: controlling transport
542 * @ctxt: send_ctxt for the Send WR 635 * @ctxt: send_ctxt for the Send WR
@@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
559 u32 xdr_pad; 652 u32 xdr_pad;
560 int ret; 653 int ret;
561 654
562 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 655 if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
563 return -EIO; 656 return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
657
658 ++ctxt->sc_cur_sge_no;
564 ret = svc_rdma_dma_map_buf(rdma, ctxt, 659 ret = svc_rdma_dma_map_buf(rdma, ctxt,
565 xdr->head[0].iov_base, 660 xdr->head[0].iov_base,
566 xdr->head[0].iov_len); 661 xdr->head[0].iov_len);
@@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
591 while (remaining) { 686 while (remaining) {
592 len = min_t(u32, PAGE_SIZE - page_off, remaining); 687 len = min_t(u32, PAGE_SIZE - page_off, remaining);
593 688
594 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 689 ++ctxt->sc_cur_sge_no;
595 return -EIO;
596 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, 690 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
597 page_off, len); 691 page_off, len);
598 if (ret < 0) 692 if (ret < 0)
@@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
606 len = xdr->tail[0].iov_len; 700 len = xdr->tail[0].iov_len;
607tail: 701tail:
608 if (len) { 702 if (len) {
609 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 703 ++ctxt->sc_cur_sge_no;
610 return -EIO;
611 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); 704 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
612 if (ret < 0) 705 if (ret < 0)
613 return ret; 706 return ret;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 924c17d46903..57f86c63a463 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
419 /* Transport header, head iovec, tail iovec */ 419 /* Transport header, head iovec, tail iovec */
420 newxprt->sc_max_send_sges = 3; 420 newxprt->sc_max_send_sges = 3;
421 /* Add one SGE per page list entry */ 421 /* Add one SGE per page list entry */
422 newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE; 422 newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
423 if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) { 423 if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
424 pr_err("svcrdma: too few Send SGEs available (%d needed)\n", 424 newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
425 newxprt->sc_max_send_sges);
426 goto errout;
427 }
428 newxprt->sc_max_req_size = svcrdma_max_req_size; 425 newxprt->sc_max_req_size = svcrdma_max_req_size;
429 newxprt->sc_max_requests = svcrdma_max_requests; 426 newxprt->sc_max_requests = svcrdma_max_requests;
430 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; 427 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 7749a2bf6887..21113bfd4eca 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -527,7 +527,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
527 527
528 sendcq = ib_alloc_cq(ia->ri_device, NULL, 528 sendcq = ib_alloc_cq(ia->ri_device, NULL,
529 ep->rep_attr.cap.max_send_wr + 1, 529 ep->rep_attr.cap.max_send_wr + 1,
530 1, IB_POLL_WORKQUEUE); 530 ia->ri_device->num_comp_vectors > 1 ? 1 : 0,
531 IB_POLL_WORKQUEUE);
531 if (IS_ERR(sendcq)) { 532 if (IS_ERR(sendcq)) {
532 rc = PTR_ERR(sendcq); 533 rc = PTR_ERR(sendcq);
533 goto out1; 534 goto out1;
@@ -845,17 +846,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
845 for (i = 0; i <= buf->rb_sc_last; i++) { 846 for (i = 0; i <= buf->rb_sc_last; i++) {
846 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); 847 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
847 if (!sc) 848 if (!sc)
848 goto out_destroy; 849 return -ENOMEM;
849 850
850 sc->sc_xprt = r_xprt; 851 sc->sc_xprt = r_xprt;
851 buf->rb_sc_ctxs[i] = sc; 852 buf->rb_sc_ctxs[i] = sc;
852 } 853 }
853 854
854 return 0; 855 return 0;
855
856out_destroy:
857 rpcrdma_sendctxs_destroy(buf);
858 return -ENOMEM;
859} 856}
860 857
861/* The sendctx queue is not guaranteed to have a size that is a 858/* The sendctx queue is not guaranteed to have a size that is a
@@ -1113,8 +1110,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1113 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1110 WQ_MEM_RECLAIM | WQ_HIGHPRI,
1114 0, 1111 0,
1115 r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); 1112 r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]);
1116 if (!buf->rb_completion_wq) 1113 if (!buf->rb_completion_wq) {
1114 rc = -ENOMEM;
1117 goto out; 1115 goto out;
1116 }
1118 1117
1119 return 0; 1118 return 0;
1120out: 1119out:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 13559e6a460b..7754aa3e434f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -48,6 +48,7 @@
48#include <net/udp.h> 48#include <net/udp.h>
49#include <net/tcp.h> 49#include <net/tcp.h>
50#include <linux/bvec.h> 50#include <linux/bvec.h>
51#include <linux/highmem.h>
51#include <linux/uio.h> 52#include <linux/uio.h>
52 53
53#include <trace/events/sunrpc.h> 54#include <trace/events/sunrpc.h>
@@ -376,6 +377,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
376 return sock_recvmsg(sock, msg, flags); 377 return sock_recvmsg(sock, msg, flags);
377} 378}
378 379
380#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
381static void
382xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
383{
384 struct bvec_iter bi = {
385 .bi_size = count,
386 };
387 struct bio_vec bv;
388
389 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
390 for_each_bvec(bv, bvec, bi, bi)
391 flush_dcache_page(bv.bv_page);
392}
393#else
394static inline void
395xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
396{
397}
398#endif
399
379static ssize_t 400static ssize_t
380xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, 401xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
381 struct xdr_buf *buf, size_t count, size_t seek, size_t *read) 402 struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
@@ -409,6 +430,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
409 seek + buf->page_base); 430 seek + buf->page_base);
410 if (ret <= 0) 431 if (ret <= 0)
411 goto sock_err; 432 goto sock_err;
433 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
412 offset += ret - buf->page_base; 434 offset += ret - buf->page_base;
413 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 435 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
414 goto out; 436 goto out;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 2792a3cae682..85ad5c0678d0 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1145,7 +1145,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1145 default: 1145 default:
1146 pr_warn("Dropping received illegal msg type\n"); 1146 pr_warn("Dropping received illegal msg type\n");
1147 kfree_skb(skb); 1147 kfree_skb(skb);
1148 return false; 1148 return true;
1149 }; 1149 };
1150} 1150}
1151 1151
@@ -1425,6 +1425,10 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1425 l->rcv_unacked = 0; 1425 l->rcv_unacked = 0;
1426 } else { 1426 } else {
1427 /* RESET_MSG or ACTIVATE_MSG */ 1427 /* RESET_MSG or ACTIVATE_MSG */
1428 if (mtyp == ACTIVATE_MSG) {
1429 msg_set_dest_session_valid(hdr, 1);
1430 msg_set_dest_session(hdr, l->peer_session);
1431 }
1428 msg_set_max_pkt(hdr, l->advertised_mtu); 1432 msg_set_max_pkt(hdr, l->advertised_mtu);
1429 strcpy(data, l->if_name); 1433 strcpy(data, l->if_name);
1430 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); 1434 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
@@ -1642,6 +1646,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1642 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1646 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1643 break; 1647 break;
1644 } 1648 }
1649
1650 /* If this endpoint was re-created while peer was ESTABLISHING
1651 * it doesn't know current session number. Force re-synch.
1652 */
1653 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
1654 l->session != msg_dest_session(hdr)) {
1655 if (less(l->session, msg_dest_session(hdr)))
1656 l->session = msg_dest_session(hdr) + 1;
1657 break;
1658 }
1659
1645 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1660 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1646 if (mtyp == RESET_MSG || !link_is_up(l)) 1661 if (mtyp == RESET_MSG || !link_is_up(l))
1647 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1662 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index a0924956bb61..d7e4b8b93f9d 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -360,6 +360,28 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n)
360 msg_set_bits(m, 1, 0, 0xffff, n); 360 msg_set_bits(m, 1, 0, 0xffff, n);
361} 361}
362 362
363/* Note: reusing bits in word 1 for ACTIVATE_MSG only, to re-synch
364 * link peer session number
365 */
366static inline bool msg_dest_session_valid(struct tipc_msg *m)
367{
368 return msg_bits(m, 1, 16, 0x1);
369}
370
371static inline void msg_set_dest_session_valid(struct tipc_msg *m, bool valid)
372{
373 msg_set_bits(m, 1, 16, 0x1, valid);
374}
375
376static inline u16 msg_dest_session(struct tipc_msg *m)
377{
378 return msg_bits(m, 1, 0, 0xffff);
379}
380
381static inline void msg_set_dest_session(struct tipc_msg *m, u16 n)
382{
383 msg_set_bits(m, 1, 0, 0xffff, n);
384}
363 385
364/* 386/*
365 * Word 2 387 * Word 2
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 40f5cae623a7..4ad3586da8f0 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
87 return limit; 87 return limit;
88} 88}
89 89
90static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
91{
92 return TLV_GET_LEN(tlv) - TLV_SPACE(0);
93}
94
90static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) 95static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
91{ 96{
92 struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); 97 struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
166 return buf; 171 return buf;
167} 172}
168 173
174static inline bool string_is_valid(char *s, int len)
175{
176 return memchr(s, '\0', len) ? true : false;
177}
178
169static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, 179static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
170 struct tipc_nl_compat_msg *msg, 180 struct tipc_nl_compat_msg *msg,
171 struct sk_buff *arg) 181 struct sk_buff *arg)
@@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
379 struct nlattr *prop; 389 struct nlattr *prop;
380 struct nlattr *bearer; 390 struct nlattr *bearer;
381 struct tipc_bearer_config *b; 391 struct tipc_bearer_config *b;
392 int len;
382 393
383 b = (struct tipc_bearer_config *)TLV_DATA(msg->req); 394 b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
384 395
@@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
386 if (!bearer) 397 if (!bearer)
387 return -EMSGSIZE; 398 return -EMSGSIZE;
388 399
400 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
401 if (!string_is_valid(b->name, len))
402 return -EINVAL;
403
389 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) 404 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
390 return -EMSGSIZE; 405 return -EMSGSIZE;
391 406
@@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
411{ 426{
412 char *name; 427 char *name;
413 struct nlattr *bearer; 428 struct nlattr *bearer;
429 int len;
414 430
415 name = (char *)TLV_DATA(msg->req); 431 name = (char *)TLV_DATA(msg->req);
416 432
@@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
418 if (!bearer) 434 if (!bearer)
419 return -EMSGSIZE; 435 return -EMSGSIZE;
420 436
437 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
438 if (!string_is_valid(name, len))
439 return -EINVAL;
440
421 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) 441 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
422 return -EMSGSIZE; 442 return -EMSGSIZE;
423 443
@@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
478 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; 498 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
479 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; 499 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
480 int err; 500 int err;
501 int len;
481 502
482 if (!attrs[TIPC_NLA_LINK]) 503 if (!attrs[TIPC_NLA_LINK])
483 return -EINVAL; 504 return -EINVAL;
@@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
504 return err; 525 return err;
505 526
506 name = (char *)TLV_DATA(msg->req); 527 name = (char *)TLV_DATA(msg->req);
528
529 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
530 if (!string_is_valid(name, len))
531 return -EINVAL;
532
507 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) 533 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
508 return 0; 534 return 0;
509 535
@@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
644 struct nlattr *prop; 670 struct nlattr *prop;
645 struct nlattr *media; 671 struct nlattr *media;
646 struct tipc_link_config *lc; 672 struct tipc_link_config *lc;
673 int len;
647 674
648 lc = (struct tipc_link_config *)TLV_DATA(msg->req); 675 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
649 676
@@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
651 if (!media) 678 if (!media)
652 return -EMSGSIZE; 679 return -EMSGSIZE;
653 680
681 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
682 if (!string_is_valid(lc->name, len))
683 return -EINVAL;
684
654 if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name)) 685 if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
655 return -EMSGSIZE; 686 return -EMSGSIZE;
656 687
@@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
671 struct nlattr *prop; 702 struct nlattr *prop;
672 struct nlattr *bearer; 703 struct nlattr *bearer;
673 struct tipc_link_config *lc; 704 struct tipc_link_config *lc;
705 int len;
674 706
675 lc = (struct tipc_link_config *)TLV_DATA(msg->req); 707 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
676 708
@@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
678 if (!bearer) 710 if (!bearer)
679 return -EMSGSIZE; 711 return -EMSGSIZE;
680 712
713 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
714 if (!string_is_valid(lc->name, len))
715 return -EINVAL;
716
681 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name)) 717 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
682 return -EMSGSIZE; 718 return -EMSGSIZE;
683 719
@@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
726 struct tipc_link_config *lc; 762 struct tipc_link_config *lc;
727 struct tipc_bearer *bearer; 763 struct tipc_bearer *bearer;
728 struct tipc_media *media; 764 struct tipc_media *media;
765 int len;
729 766
730 lc = (struct tipc_link_config *)TLV_DATA(msg->req); 767 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
731 768
769 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
770 if (!string_is_valid(lc->name, len))
771 return -EINVAL;
772
732 media = tipc_media_find(lc->name); 773 media = tipc_media_find(lc->name);
733 if (media) { 774 if (media) {
734 cmd->doit = &__tipc_nl_media_set; 775 cmd->doit = &__tipc_nl_media_set;
@@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
750{ 791{
751 char *name; 792 char *name;
752 struct nlattr *link; 793 struct nlattr *link;
794 int len;
753 795
754 name = (char *)TLV_DATA(msg->req); 796 name = (char *)TLV_DATA(msg->req);
755 797
@@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
757 if (!link) 799 if (!link)
758 return -EMSGSIZE; 800 return -EMSGSIZE;
759 801
802 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
803 if (!string_is_valid(name, len))
804 return -EINVAL;
805
760 if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) 806 if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
761 return -EMSGSIZE; 807 return -EMSGSIZE;
762 808
@@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
778 }; 824 };
779 825
780 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); 826 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
827 if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
828 return -EINVAL;
781 829
782 depth = ntohl(ntq->depth); 830 depth = ntohl(ntq->depth);
783 831
@@ -904,8 +952,10 @@ static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
904 952
905 hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI, 953 hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
906 TIPC_NL_PUBL_GET); 954 TIPC_NL_PUBL_GET);
907 if (!hdr) 955 if (!hdr) {
956 kfree_skb(args);
908 return -EMSGSIZE; 957 return -EMSGSIZE;
958 }
909 959
910 nest = nla_nest_start(args, TIPC_NLA_SOCK); 960 nest = nla_nest_start(args, TIPC_NLA_SOCK);
911 if (!nest) { 961 if (!nest) {
@@ -1206,7 +1256,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
1206 } 1256 }
1207 1257
1208 len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); 1258 len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
1209 if (len && !TLV_OK(msg.req, len)) { 1259 if (!len || !TLV_OK(msg.req, len)) {
1210 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); 1260 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
1211 err = -EOPNOTSUPP; 1261 err = -EOPNOTSUPP;
1212 goto send; 1262 goto send;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index db2a6c3e0be9..2dc4919ab23c 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -830,15 +830,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
830 tipc_node_write_lock(n); 830 tipc_node_write_lock(n);
831 if (!tipc_link_is_establishing(l)) { 831 if (!tipc_link_is_establishing(l)) {
832 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 832 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
833 if (delete) {
834 kfree(l);
835 le->link = NULL;
836 n->link_cnt--;
837 }
838 } else { 833 } else {
839 /* Defuse pending tipc_node_link_up() */ 834 /* Defuse pending tipc_node_link_up() */
835 tipc_link_reset(l);
840 tipc_link_fsm_evt(l, LINK_RESET_EVT); 836 tipc_link_fsm_evt(l, LINK_RESET_EVT);
841 } 837 }
838 if (delete) {
839 kfree(l);
840 le->link = NULL;
841 n->link_cnt--;
842 }
842 trace_tipc_node_link_down(n, true, "node link down or deleted!"); 843 trace_tipc_node_link_down(n, true, "node link down or deleted!");
843 tipc_node_write_unlock(n); 844 tipc_node_write_unlock(n);
844 if (delete) 845 if (delete)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1217c90a363b..684f2125fc6b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -388,7 +388,7 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
388 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 388 rc_ = tipc_sk_sock_err((sock_), timeo_); \
389 if (rc_) \ 389 if (rc_) \
390 break; \ 390 break; \
391 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 391 add_wait_queue(sk_sleep(sk_), &wait_); \
392 release_sock(sk_); \ 392 release_sock(sk_); \
393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
394 sched_annotate_sleep(); \ 394 sched_annotate_sleep(); \
@@ -1677,7 +1677,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk)
1677static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1677static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1678{ 1678{
1679 struct sock *sk = sock->sk; 1679 struct sock *sk = sock->sk;
1680 DEFINE_WAIT(wait); 1680 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1681 long timeo = *timeop; 1681 long timeo = *timeop;
1682 int err = sock_error(sk); 1682 int err = sock_error(sk);
1683 1683
@@ -1685,15 +1685,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1685 return err; 1685 return err;
1686 1686
1687 for (;;) { 1687 for (;;) {
1688 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1689 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1688 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1690 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1689 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1691 err = -ENOTCONN; 1690 err = -ENOTCONN;
1692 break; 1691 break;
1693 } 1692 }
1693 add_wait_queue(sk_sleep(sk), &wait);
1694 release_sock(sk); 1694 release_sock(sk);
1695 timeo = schedule_timeout(timeo); 1695 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1696 sched_annotate_sleep();
1696 lock_sock(sk); 1697 lock_sock(sk);
1698 remove_wait_queue(sk_sleep(sk), &wait);
1697 } 1699 }
1698 err = 0; 1700 err = 0;
1699 if (!skb_queue_empty(&sk->sk_receive_queue)) 1701 if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -1709,7 +1711,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1709 if (err) 1711 if (err)
1710 break; 1712 break;
1711 } 1713 }
1712 finish_wait(sk_sleep(sk), &wait);
1713 *timeop = timeo; 1714 *timeop = timeo;
1714 return err; 1715 return err;
1715} 1716}
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index efb16f69bd2c..a457c0fbbef1 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -398,7 +398,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
398 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); 398 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
399 if (ret == -EWOULDBLOCK) 399 if (ret == -EWOULDBLOCK)
400 return -EWOULDBLOCK; 400 return -EWOULDBLOCK;
401 if (ret > 0) { 401 if (ret == sizeof(s)) {
402 read_lock_bh(&sk->sk_callback_lock); 402 read_lock_bh(&sk->sk_callback_lock);
403 ret = tipc_conn_rcv_sub(srv, con, &s); 403 ret = tipc_conn_rcv_sub(srv, con, &s);
404 read_unlock_bh(&sk->sk_callback_lock); 404 read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 11cdc8f7db63..bf5b54b513bc 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -439,6 +439,8 @@ static int tls_do_encryption(struct sock *sk,
439 struct scatterlist *sge = sk_msg_elem(msg_en, start); 439 struct scatterlist *sge = sk_msg_elem(msg_en, start);
440 int rc; 440 int rc;
441 441
442 memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
443
442 sge->offset += tls_ctx->tx.prepend_size; 444 sge->offset += tls_ctx->tx.prepend_size;
443 sge->length -= tls_ctx->tx.prepend_size; 445 sge->length -= tls_ctx->tx.prepend_size;
444 446
@@ -448,7 +450,7 @@ static int tls_do_encryption(struct sock *sk,
448 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); 450 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
449 aead_request_set_crypt(aead_req, rec->sg_aead_in, 451 aead_request_set_crypt(aead_req, rec->sg_aead_in,
450 rec->sg_aead_out, 452 rec->sg_aead_out,
451 data_len, tls_ctx->tx.iv); 453 data_len, rec->iv_data);
452 454
453 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 455 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
454 tls_encrypt_done, sk); 456 tls_encrypt_done, sk);
@@ -1792,7 +1794,9 @@ void tls_sw_free_resources_tx(struct sock *sk)
1792 if (atomic_read(&ctx->encrypt_pending)) 1794 if (atomic_read(&ctx->encrypt_pending))
1793 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1795 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1794 1796
1797 release_sock(sk);
1795 cancel_delayed_work_sync(&ctx->tx_work.work); 1798 cancel_delayed_work_sync(&ctx->tx_work.work);
1799 lock_sock(sk);
1796 1800
1797 /* Tx whatever records we can transmit and abandon the rest */ 1801 /* Tx whatever records we can transmit and abandon the rest */
1798 tls_tx_records(sk, -1); 1802 tls_tx_records(sk, -1);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 74d1eed7cbd4..a95d479caeea 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -890,7 +890,7 @@ retry:
890 addr->hash ^= sk->sk_type; 890 addr->hash ^= sk->sk_type;
891 891
892 __unix_remove_socket(sk); 892 __unix_remove_socket(sk);
893 u->addr = addr; 893 smp_store_release(&u->addr, addr);
894 __unix_insert_socket(&unix_socket_table[addr->hash], sk); 894 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
895 spin_unlock(&unix_table_lock); 895 spin_unlock(&unix_table_lock);
896 err = 0; 896 err = 0;
@@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1060 1060
1061 err = 0; 1061 err = 0;
1062 __unix_remove_socket(sk); 1062 __unix_remove_socket(sk);
1063 u->addr = addr; 1063 smp_store_release(&u->addr, addr);
1064 __unix_insert_socket(list, sk); 1064 __unix_insert_socket(list, sk);
1065 1065
1066out_unlock: 1066out_unlock:
@@ -1331,15 +1331,29 @@ restart:
1331 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); 1331 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1332 otheru = unix_sk(other); 1332 otheru = unix_sk(other);
1333 1333
1334 /* copy address information from listening to new sock*/ 1334 /* copy address information from listening to new sock
1335 if (otheru->addr) { 1335 *
1336 refcount_inc(&otheru->addr->refcnt); 1336 * The contents of *(otheru->addr) and otheru->path
1337 newu->addr = otheru->addr; 1337 * are seen fully set up here, since we have found
1338 } 1338 * otheru in hash under unix_table_lock. Insertion
1339 * into the hash chain we'd found it in had been done
1340 * in an earlier critical area protected by unix_table_lock,
1341 * the same one where we'd set *(otheru->addr) contents,
1342 * as well as otheru->path and otheru->addr itself.
1343 *
1344 * Using smp_store_release() here to set newu->addr
1345 * is enough to make those stores, as well as stores
1346 * to newu->path visible to anyone who gets newu->addr
1347 * by smp_load_acquire(). IOW, the same warranties
1348 * as for unix_sock instances bound in unix_bind() or
1349 * in unix_autobind().
1350 */
1339 if (otheru->path.dentry) { 1351 if (otheru->path.dentry) {
1340 path_get(&otheru->path); 1352 path_get(&otheru->path);
1341 newu->path = otheru->path; 1353 newu->path = otheru->path;
1342 } 1354 }
1355 refcount_inc(&otheru->addr->refcnt);
1356 smp_store_release(&newu->addr, otheru->addr);
1343 1357
1344 /* Set credentials */ 1358 /* Set credentials */
1345 copy_peercred(sk, other); 1359 copy_peercred(sk, other);
@@ -1453,7 +1467,7 @@ out:
1453static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) 1467static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1454{ 1468{
1455 struct sock *sk = sock->sk; 1469 struct sock *sk = sock->sk;
1456 struct unix_sock *u; 1470 struct unix_address *addr;
1457 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); 1471 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1458 int err = 0; 1472 int err = 0;
1459 1473
@@ -1468,19 +1482,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1468 sock_hold(sk); 1482 sock_hold(sk);
1469 } 1483 }
1470 1484
1471 u = unix_sk(sk); 1485 addr = smp_load_acquire(&unix_sk(sk)->addr);
1472 unix_state_lock(sk); 1486 if (!addr) {
1473 if (!u->addr) {
1474 sunaddr->sun_family = AF_UNIX; 1487 sunaddr->sun_family = AF_UNIX;
1475 sunaddr->sun_path[0] = 0; 1488 sunaddr->sun_path[0] = 0;
1476 err = sizeof(short); 1489 err = sizeof(short);
1477 } else { 1490 } else {
1478 struct unix_address *addr = u->addr;
1479
1480 err = addr->len; 1491 err = addr->len;
1481 memcpy(sunaddr, addr->name, addr->len); 1492 memcpy(sunaddr, addr->name, addr->len);
1482 } 1493 }
1483 unix_state_unlock(sk);
1484 sock_put(sk); 1494 sock_put(sk);
1485out: 1495out:
1486 return err; 1496 return err;
@@ -2073,11 +2083,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2073 2083
2074static void unix_copy_addr(struct msghdr *msg, struct sock *sk) 2084static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2075{ 2085{
2076 struct unix_sock *u = unix_sk(sk); 2086 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2077 2087
2078 if (u->addr) { 2088 if (addr) {
2079 msg->msg_namelen = u->addr->len; 2089 msg->msg_namelen = addr->len;
2080 memcpy(msg->msg_name, u->addr->name, u->addr->len); 2090 memcpy(msg->msg_name, addr->name, addr->len);
2081 } 2091 }
2082} 2092}
2083 2093
@@ -2581,15 +2591,14 @@ static int unix_open_file(struct sock *sk)
2581 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2591 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2582 return -EPERM; 2592 return -EPERM;
2583 2593
2584 unix_state_lock(sk); 2594 if (!smp_load_acquire(&unix_sk(sk)->addr))
2595 return -ENOENT;
2596
2585 path = unix_sk(sk)->path; 2597 path = unix_sk(sk)->path;
2586 if (!path.dentry) { 2598 if (!path.dentry)
2587 unix_state_unlock(sk);
2588 return -ENOENT; 2599 return -ENOENT;
2589 }
2590 2600
2591 path_get(&path); 2601 path_get(&path);
2592 unix_state_unlock(sk);
2593 2602
2594 fd = get_unused_fd_flags(O_CLOEXEC); 2603 fd = get_unused_fd_flags(O_CLOEXEC);
2595 if (fd < 0) 2604 if (fd < 0)
@@ -2830,7 +2839,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
2830 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), 2839 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2831 sock_i_ino(s)); 2840 sock_i_ino(s));
2832 2841
2833 if (u->addr) { 2842 if (u->addr) { // under unix_table_lock here
2834 int i, len; 2843 int i, len;
2835 seq_putc(seq, ' '); 2844 seq_putc(seq, ' ');
2836 2845
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e83462..3183d9b8ab33 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
10 10
11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) 11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
12{ 12{
13 struct unix_address *addr = unix_sk(sk)->addr; 13 /* might or might not have unix_table_lock */
14 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
14 15
15 if (!addr) 16 if (!addr)
16 return 0; 17 return 0;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 5d3cce9e8744..15eb5d3d4750 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
75{ 75{
76 struct virtio_vsock *vsock = virtio_vsock_get(); 76 struct virtio_vsock *vsock = virtio_vsock_get();
77 77
78 if (!vsock)
79 return VMADDR_CID_ANY;
80
78 return vsock->guest_cid; 81 return vsock->guest_cid;
79} 82}
80 83
@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
584 587
585 virtio_vsock_update_guest_cid(vsock); 588 virtio_vsock_update_guest_cid(vsock);
586 589
587 ret = vsock_core_init(&virtio_transport.transport);
588 if (ret < 0)
589 goto out_vqs;
590
591 vsock->rx_buf_nr = 0; 590 vsock->rx_buf_nr = 0;
592 vsock->rx_buf_max_nr = 0; 591 vsock->rx_buf_max_nr = 0;
593 atomic_set(&vsock->queued_replies, 0); 592 atomic_set(&vsock->queued_replies, 0);
@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
618 mutex_unlock(&the_virtio_vsock_mutex); 617 mutex_unlock(&the_virtio_vsock_mutex);
619 return 0; 618 return 0;
620 619
621out_vqs:
622 vsock->vdev->config->del_vqs(vsock->vdev);
623out: 620out:
624 kfree(vsock); 621 kfree(vsock);
625 mutex_unlock(&the_virtio_vsock_mutex); 622 mutex_unlock(&the_virtio_vsock_mutex);
@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
637 flush_work(&vsock->event_work); 634 flush_work(&vsock->event_work);
638 flush_work(&vsock->send_pkt_work); 635 flush_work(&vsock->send_pkt_work);
639 636
637 /* Reset all connected sockets when the device disappear */
638 vsock_for_each_connected_socket(virtio_vsock_reset_sock);
639
640 vdev->config->reset(vdev); 640 vdev->config->reset(vdev);
641 641
642 mutex_lock(&vsock->rx_lock); 642 mutex_lock(&vsock->rx_lock);
@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
669 669
670 mutex_lock(&the_virtio_vsock_mutex); 670 mutex_lock(&the_virtio_vsock_mutex);
671 the_virtio_vsock = NULL; 671 the_virtio_vsock = NULL;
672 vsock_core_exit();
673 mutex_unlock(&the_virtio_vsock_mutex); 672 mutex_unlock(&the_virtio_vsock_mutex);
674 673
675 vdev->config->del_vqs(vdev); 674 vdev->config->del_vqs(vdev);
@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void)
702 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); 701 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
703 if (!virtio_vsock_workqueue) 702 if (!virtio_vsock_workqueue)
704 return -ENOMEM; 703 return -ENOMEM;
704
705 ret = register_virtio_driver(&virtio_vsock_driver); 705 ret = register_virtio_driver(&virtio_vsock_driver);
706 if (ret) 706 if (ret)
707 destroy_workqueue(virtio_vsock_workqueue); 707 goto out_wq;
708
709 ret = vsock_core_init(&virtio_transport.transport);
710 if (ret)
711 goto out_vdr;
712
713 return 0;
714
715out_vdr:
716 unregister_virtio_driver(&virtio_vsock_driver);
717out_wq:
718 destroy_workqueue(virtio_vsock_workqueue);
708 return ret; 719 return ret;
720
709} 721}
710 722
711static void __exit virtio_vsock_exit(void) 723static void __exit virtio_vsock_exit(void)
712{ 724{
725 vsock_core_exit();
713 unregister_virtio_driver(&virtio_vsock_driver); 726 unregister_virtio_driver(&virtio_vsock_driver);
714 destroy_workqueue(virtio_vsock_workqueue); 727 destroy_workqueue(virtio_vsock_workqueue);
715} 728}
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index c361ce782412..c3d5ab01fba7 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
1651 1651
1652static void vmci_transport_destruct(struct vsock_sock *vsk) 1652static void vmci_transport_destruct(struct vsock_sock *vsk)
1653{ 1653{
1654 /* transport can be NULL if we hit a failure at init() time */
1655 if (!vmci_trans(vsk))
1656 return;
1657
1654 /* Ensure that the detach callback doesn't use the sk/vsk 1658 /* Ensure that the detach callback doesn't use the sk/vsk
1655 * we are about to destruct. 1659 * we are about to destruct.
1656 */ 1660 */
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 882d97bdc6bf..550ac9d827fe 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
41 cfg80211_sched_dfs_chan_update(rdev); 41 cfg80211_sched_dfs_chan_update(rdev);
42 } 42 }
43 43
44 schedule_work(&cfg80211_disconnect_work);
45
44 return err; 46 return err;
45} 47}
46 48
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 623dfe5e211c..b36ad8efb5e5 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1068,6 +1068,8 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync)
1068 1068
1069 ASSERT_RTNL(); 1069 ASSERT_RTNL();
1070 1070
1071 flush_work(&wdev->pmsr_free_wk);
1072
1071 nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); 1073 nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
1072 1074
1073 list_del_rcu(&wdev->list); 1075 list_del_rcu(&wdev->list);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index c5d6f3418601..f6b40563dc63 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -445,6 +445,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
445bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, 445bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
446 u32 center_freq_khz, u32 bw_khz); 446 u32 center_freq_khz, u32 bw_khz);
447 447
448extern struct work_struct cfg80211_disconnect_work;
449
448/** 450/**
449 * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable 451 * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
450 * @wiphy: the wiphy to validate against 452 * @wiphy: the wiphy to validate against
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5e49492d5911..d91a408db113 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -250,7 +250,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
250 [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = 250 [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] =
251 NLA_POLICY_MAX(NLA_U8, 15), 251 NLA_POLICY_MAX(NLA_U8, 15),
252 [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = 252 [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] =
253 NLA_POLICY_MAX(NLA_U8, 15), 253 NLA_POLICY_MAX(NLA_U8, 31),
254 [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, 254 [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 },
255 [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, 255 [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG },
256 [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, 256 [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG },
@@ -555,7 +555,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
555 }, 555 },
556 [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1), 556 [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1),
557 [NL80211_ATTR_PEER_MEASUREMENTS] = 557 [NL80211_ATTR_PEER_MEASUREMENTS] =
558 NLA_POLICY_NESTED(NL80211_PMSR_FTM_REQ_ATTR_MAX, 558 NLA_POLICY_NESTED(NL80211_PMSR_ATTR_MAX,
559 nl80211_pmsr_attr_policy), 559 nl80211_pmsr_attr_policy),
560}; 560};
561 561
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index de9286703280..0216ab555249 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -256,8 +256,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
256 if (err) 256 if (err)
257 goto out_err; 257 goto out_err;
258 } else { 258 } else {
259 memcpy(req->mac_addr, nla_data(info->attrs[NL80211_ATTR_MAC]), 259 memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN);
260 ETH_ALEN);
261 memset(req->mac_addr_mask, 0xff, ETH_ALEN); 260 memset(req->mac_addr_mask, 0xff, ETH_ALEN);
262 } 261 }
263 262
@@ -272,6 +271,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
272 271
273 req->n_peers = count; 272 req->n_peers = count;
274 req->cookie = cfg80211_assign_cookie(rdev); 273 req->cookie = cfg80211_assign_cookie(rdev);
274 req->nl_portid = info->snd_portid;
275 275
276 err = rdev_start_pmsr(rdev, wdev, req); 276 err = rdev_start_pmsr(rdev, wdev, req);
277 if (err) 277 if (err)
@@ -530,14 +530,14 @@ free:
530} 530}
531EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); 531EXPORT_SYMBOL_GPL(cfg80211_pmsr_report);
532 532
533void cfg80211_pmsr_free_wk(struct work_struct *work) 533static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev)
534{ 534{
535 struct wireless_dev *wdev = container_of(work, struct wireless_dev,
536 pmsr_free_wk);
537 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 535 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
538 struct cfg80211_pmsr_request *req, *tmp; 536 struct cfg80211_pmsr_request *req, *tmp;
539 LIST_HEAD(free_list); 537 LIST_HEAD(free_list);
540 538
539 lockdep_assert_held(&wdev->mtx);
540
541 spin_lock_bh(&wdev->pmsr_lock); 541 spin_lock_bh(&wdev->pmsr_lock);
542 list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { 542 list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
543 if (req->nl_portid) 543 if (req->nl_portid)
@@ -547,14 +547,22 @@ void cfg80211_pmsr_free_wk(struct work_struct *work)
547 spin_unlock_bh(&wdev->pmsr_lock); 547 spin_unlock_bh(&wdev->pmsr_lock);
548 548
549 list_for_each_entry_safe(req, tmp, &free_list, list) { 549 list_for_each_entry_safe(req, tmp, &free_list, list) {
550 wdev_lock(wdev);
551 rdev_abort_pmsr(rdev, wdev, req); 550 rdev_abort_pmsr(rdev, wdev, req);
552 wdev_unlock(wdev);
553 551
554 kfree(req); 552 kfree(req);
555 } 553 }
556} 554}
557 555
556void cfg80211_pmsr_free_wk(struct work_struct *work)
557{
558 struct wireless_dev *wdev = container_of(work, struct wireless_dev,
559 pmsr_free_wk);
560
561 wdev_lock(wdev);
562 cfg80211_pmsr_process_abort(wdev);
563 wdev_unlock(wdev);
564}
565
558void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) 566void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
559{ 567{
560 struct cfg80211_pmsr_request *req; 568 struct cfg80211_pmsr_request *req;
@@ -568,8 +576,8 @@ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
568 spin_unlock_bh(&wdev->pmsr_lock); 576 spin_unlock_bh(&wdev->pmsr_lock);
569 577
570 if (found) 578 if (found)
571 schedule_work(&wdev->pmsr_free_wk); 579 cfg80211_pmsr_process_abort(wdev);
572 flush_work(&wdev->pmsr_free_wk); 580
573 WARN_ON(!list_empty(&wdev->pmsr_list)); 581 WARN_ON(!list_empty(&wdev->pmsr_list));
574} 582}
575 583
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ecfb1a06dbb2..dd58b9909ac9 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1024,8 +1024,13 @@ static void regdb_fw_cb(const struct firmware *fw, void *context)
1024 } 1024 }
1025 1025
1026 rtnl_lock(); 1026 rtnl_lock();
1027 if (WARN_ON(regdb && !IS_ERR(regdb))) { 1027 if (regdb && !IS_ERR(regdb)) {
1028 /* just restore and free new db */ 1028 /* negative case - a bug
1029 * positive case - can happen due to race in case of multiple cb's in
1030 * queue, due to usage of asynchronous callback
1031 *
1032 * Either case, just restore and free new db.
1033 */
1029 } else if (set_error) { 1034 } else if (set_error) {
1030 regdb = ERR_PTR(set_error); 1035 regdb = ERR_PTR(set_error);
1031 } else if (fw) { 1036 } else if (fw) {
@@ -1255,7 +1260,7 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
1255 * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"), 1260 * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
1256 * however it is safe for now to assume that a frequency rule should not be 1261 * however it is safe for now to assume that a frequency rule should not be
1257 * part of a frequency's band if the start freq or end freq are off by more 1262 * part of a frequency's band if the start freq or end freq are off by more
1258 * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the 1263 * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the
1259 * 60 GHz band. 1264 * 60 GHz band.
1260 * This resolution can be lowered and should be considered as we add 1265 * This resolution can be lowered and should be considered as we add
1261 * regulatory rule support for other "bands". 1266 * regulatory rule support for other "bands".
@@ -1270,7 +1275,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
1270 * with the Channel starting frequency above 45 GHz. 1275 * with the Channel starting frequency above 45 GHz.
1271 */ 1276 */
1272 u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ? 1277 u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
1273 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; 1278 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
1274 if (abs(freq_khz - freq_range->start_freq_khz) <= limit) 1279 if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
1275 return true; 1280 return true;
1276 if (abs(freq_khz - freq_range->end_freq_khz) <= limit) 1281 if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f741d8376a46..7d34cb884840 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work)
667 rtnl_unlock(); 667 rtnl_unlock();
668} 668}
669 669
670static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); 670DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
671 671
672 672
673/* 673/*
diff --git a/net/wireless/util.c b/net/wireless/util.c
index cd48cdd582c0..ec30e3732c7b 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -5,7 +5,7 @@
5 * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright 2017 Intel Deutschland GmbH 7 * Copyright 2017 Intel Deutschland GmbH
8 * Copyright (C) 2018 Intel Corporation 8 * Copyright (C) 2018-2019 Intel Corporation
9 */ 9 */
10#include <linux/export.h> 10#include <linux/export.h>
11#include <linux/bitops.h> 11#include <linux/bitops.h>
@@ -19,6 +19,7 @@
19#include <linux/mpls.h> 19#include <linux/mpls.h>
20#include <linux/gcd.h> 20#include <linux/gcd.h>
21#include <linux/bitfield.h> 21#include <linux/bitfield.h>
22#include <linux/nospec.h>
22#include "core.h" 23#include "core.h"
23#include "rdev-ops.h" 24#include "rdev-ops.h"
24 25
@@ -715,20 +716,25 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
715{ 716{
716 unsigned int dscp; 717 unsigned int dscp;
717 unsigned char vlan_priority; 718 unsigned char vlan_priority;
719 unsigned int ret;
718 720
719 /* skb->priority values from 256->263 are magic values to 721 /* skb->priority values from 256->263 are magic values to
720 * directly indicate a specific 802.1d priority. This is used 722 * directly indicate a specific 802.1d priority. This is used
721 * to allow 802.1d priority to be passed directly in from VLAN 723 * to allow 802.1d priority to be passed directly in from VLAN
722 * tags, etc. 724 * tags, etc.
723 */ 725 */
724 if (skb->priority >= 256 && skb->priority <= 263) 726 if (skb->priority >= 256 && skb->priority <= 263) {
725 return skb->priority - 256; 727 ret = skb->priority - 256;
728 goto out;
729 }
726 730
727 if (skb_vlan_tag_present(skb)) { 731 if (skb_vlan_tag_present(skb)) {
728 vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) 732 vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK)
729 >> VLAN_PRIO_SHIFT; 733 >> VLAN_PRIO_SHIFT;
730 if (vlan_priority > 0) 734 if (vlan_priority > 0) {
731 return vlan_priority; 735 ret = vlan_priority;
736 goto out;
737 }
732 } 738 }
733 739
734 switch (skb->protocol) { 740 switch (skb->protocol) {
@@ -747,8 +753,9 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
747 if (!mpls) 753 if (!mpls)
748 return 0; 754 return 0;
749 755
750 return (ntohl(mpls->entry) & MPLS_LS_TC_MASK) 756 ret = (ntohl(mpls->entry) & MPLS_LS_TC_MASK)
751 >> MPLS_LS_TC_SHIFT; 757 >> MPLS_LS_TC_SHIFT;
758 goto out;
752 } 759 }
753 case htons(ETH_P_80221): 760 case htons(ETH_P_80221):
754 /* 802.21 is always network control traffic */ 761 /* 802.21 is always network control traffic */
@@ -761,18 +768,24 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
761 unsigned int i, tmp_dscp = dscp >> 2; 768 unsigned int i, tmp_dscp = dscp >> 2;
762 769
763 for (i = 0; i < qos_map->num_des; i++) { 770 for (i = 0; i < qos_map->num_des; i++) {
764 if (tmp_dscp == qos_map->dscp_exception[i].dscp) 771 if (tmp_dscp == qos_map->dscp_exception[i].dscp) {
765 return qos_map->dscp_exception[i].up; 772 ret = qos_map->dscp_exception[i].up;
773 goto out;
774 }
766 } 775 }
767 776
768 for (i = 0; i < 8; i++) { 777 for (i = 0; i < 8; i++) {
769 if (tmp_dscp >= qos_map->up[i].low && 778 if (tmp_dscp >= qos_map->up[i].low &&
770 tmp_dscp <= qos_map->up[i].high) 779 tmp_dscp <= qos_map->up[i].high) {
771 return i; 780 ret = i;
781 goto out;
782 }
772 } 783 }
773 } 784 }
774 785
775 return dscp >> 5; 786 ret = dscp >> 5;
787out:
788 return array_index_nospec(ret, IEEE80211_NUM_TIDS);
776} 789}
777EXPORT_SYMBOL(cfg80211_classify8021d); 790EXPORT_SYMBOL(cfg80211_classify8021d);
778 791
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5121729b8b63..eff31348e20b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
352 unsigned int lci = 1; 352 unsigned int lci = 1;
353 struct sock *sk; 353 struct sock *sk;
354 354
355 read_lock_bh(&x25_list_lock); 355 while ((sk = x25_find_socket(lci, nb)) != NULL) {
356
357 while ((sk = __x25_find_socket(lci, nb)) != NULL) {
358 sock_put(sk); 356 sock_put(sk);
359 if (++lci == 4096) { 357 if (++lci == 4096) {
360 lci = 0; 358 lci = 0;
361 break; 359 break;
362 } 360 }
361 cond_resched();
363 } 362 }
364 363
365 read_unlock_bh(&x25_list_lock);
366 return lci; 364 return lci;
367} 365}
368 366
@@ -681,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
681 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 679 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
682 int len, i, rc = 0; 680 int len, i, rc = 0;
683 681
684 if (!sock_flag(sk, SOCK_ZAPPED) || 682 if (addr_len != sizeof(struct sockaddr_x25) ||
685 addr_len != sizeof(struct sockaddr_x25) ||
686 addr->sx25_family != AF_X25) { 683 addr->sx25_family != AF_X25) {
687 rc = -EINVAL; 684 rc = -EINVAL;
688 goto out; 685 goto out;
@@ -701,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
701 } 698 }
702 699
703 lock_sock(sk); 700 lock_sock(sk);
704 x25_sk(sk)->source_addr = addr->sx25_addr; 701 if (sock_flag(sk, SOCK_ZAPPED)) {
705 x25_insert_socket(sk); 702 x25_sk(sk)->source_addr = addr->sx25_addr;
706 sock_reset_flag(sk, SOCK_ZAPPED); 703 x25_insert_socket(sk);
704 sock_reset_flag(sk, SOCK_ZAPPED);
705 } else {
706 rc = -EINVAL;
707 }
707 release_sock(sk); 708 release_sock(sk);
708 SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); 709 SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
709out: 710out:
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index a264cf2accd0..37e1fe180769 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
41 * not know if the device has more tx queues than rx, or the opposite. 41 * not know if the device has more tx queues than rx, or the opposite.
42 * This might also change during run time. 42 * This might also change during run time.
43 */ 43 */
44static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, 44static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
45 u16 queue_id) 45 u16 queue_id)
46{ 46{
47 if (queue_id >= max_t(unsigned int,
48 dev->real_num_rx_queues,
49 dev->real_num_tx_queues))
50 return -EINVAL;
51
47 if (queue_id < dev->real_num_rx_queues) 52 if (queue_id < dev->real_num_rx_queues)
48 dev->_rx[queue_id].umem = umem; 53 dev->_rx[queue_id].umem = umem;
49 if (queue_id < dev->real_num_tx_queues) 54 if (queue_id < dev->real_num_tx_queues)
50 dev->_tx[queue_id].umem = umem; 55 dev->_tx[queue_id].umem = umem;
56
57 return 0;
51} 58}
52 59
53struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, 60struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
@@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
88 goto out_rtnl_unlock; 95 goto out_rtnl_unlock;
89 } 96 }
90 97
91 xdp_reg_umem_at_qid(dev, umem, queue_id); 98 err = xdp_reg_umem_at_qid(dev, umem, queue_id);
99 if (err)
100 goto out_rtnl_unlock;
101
92 umem->dev = dev; 102 umem->dev = dev;
93 umem->queue_id = queue_id; 103 umem->queue_id = queue_id;
94 if (force_copy) 104 if (force_copy)
@@ -115,9 +125,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
115 return 0; 125 return 0;
116 126
117err_unreg_umem: 127err_unreg_umem:
118 xdp_clear_umem_at_qid(dev, queue_id);
119 if (!force_zc) 128 if (!force_zc)
120 err = 0; /* fallback to copy mode */ 129 err = 0; /* fallback to copy mode */
130 if (err)
131 xdp_clear_umem_at_qid(dev, queue_id);
121out_rtnl_unlock: 132out_rtnl_unlock:
122 rtnl_unlock(); 133 rtnl_unlock();
123 return err; 134 return err;
@@ -249,10 +260,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
249 if (!umem->pgs) 260 if (!umem->pgs)
250 return -ENOMEM; 261 return -ENOMEM;
251 262
252 down_write(&current->mm->mmap_sem); 263 down_read(&current->mm->mmap_sem);
253 npgs = get_user_pages(umem->address, umem->npgs, 264 npgs = get_user_pages_longterm(umem->address, umem->npgs,
254 gup_flags, &umem->pgs[0], NULL); 265 gup_flags, &umem->pgs[0], NULL);
255 up_write(&current->mm->mmap_sem); 266 up_read(&current->mm->mmap_sem);
256 267
257 if (npgs != umem->npgs) { 268 if (npgs != umem->npgs) {
258 if (npgs >= 0) { 269 if (npgs >= 0) {
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a03268454a27..85e4fe4f18cc 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -366,7 +366,6 @@ static int xsk_release(struct socket *sock)
366 366
367 xskq_destroy(xs->rx); 367 xskq_destroy(xs->rx);
368 xskq_destroy(xs->tx); 368 xskq_destroy(xs->tx);
369 xdp_put_umem(xs->umem);
370 369
371 sock_orphan(sk); 370 sock_orphan(sk);
372 sock->sk = NULL; 371 sock->sk = NULL;
@@ -669,6 +668,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
669 if (!umem) 668 if (!umem)
670 return -EINVAL; 669 return -EINVAL;
671 670
671 /* Matches the smp_wmb() in XDP_UMEM_REG */
672 smp_rmb();
672 if (offset == XDP_UMEM_PGOFF_FILL_RING) 673 if (offset == XDP_UMEM_PGOFF_FILL_RING)
673 q = READ_ONCE(umem->fq); 674 q = READ_ONCE(umem->fq);
674 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) 675 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
@@ -678,6 +679,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
678 if (!q) 679 if (!q)
679 return -EINVAL; 680 return -EINVAL;
680 681
682 /* Matches the smp_wmb() in xsk_init_queue */
683 smp_rmb();
681 qpg = virt_to_head_page(q->ring); 684 qpg = virt_to_head_page(q->ring);
682 if (size > (PAGE_SIZE << compound_order(qpg))) 685 if (size > (PAGE_SIZE << compound_order(qpg)))
683 return -EINVAL; 686 return -EINVAL;
@@ -714,6 +717,18 @@ static const struct proto_ops xsk_proto_ops = {
714 .sendpage = sock_no_sendpage, 717 .sendpage = sock_no_sendpage,
715}; 718};
716 719
720static void xsk_destruct(struct sock *sk)
721{
722 struct xdp_sock *xs = xdp_sk(sk);
723
724 if (!sock_flag(sk, SOCK_DEAD))
725 return;
726
727 xdp_put_umem(xs->umem);
728
729 sk_refcnt_debug_dec(sk);
730}
731
717static int xsk_create(struct net *net, struct socket *sock, int protocol, 732static int xsk_create(struct net *net, struct socket *sock, int protocol,
718 int kern) 733 int kern)
719{ 734{
@@ -740,6 +755,9 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
740 755
741 sk->sk_family = PF_XDP; 756 sk->sk_family = PF_XDP;
742 757
758 sk->sk_destruct = xsk_destruct;
759 sk_refcnt_debug_inc(sk);
760
743 sock_set_flag(sk, SOCK_RCU_FREE); 761 sock_set_flag(sk, SOCK_RCU_FREE);
744 762
745 xs = xdp_sk(sk); 763 xs = xdp_sk(sk);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 6be8c7df15bb..dbb3c1945b5c 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
76 int ifindex; 76 int ifindex;
77 struct xfrm_if *xi; 77 struct xfrm_if *xi;
78 78
79 if (!skb->dev) 79 if (!secpath_exists(skb) || !skb->dev)
80 return NULL; 80 return NULL;
81 81
82 xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id); 82 xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
83 ifindex = skb->dev->ifindex; 83 ifindex = skb->dev->ifindex;
84 84
85 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { 85 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 934492bad8e0..8d1a898d0ba5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -680,16 +680,6 @@ static void xfrm_hash_resize(struct work_struct *work)
680 mutex_unlock(&hash_resize_mutex); 680 mutex_unlock(&hash_resize_mutex);
681} 681}
682 682
683static void xfrm_hash_reset_inexact_table(struct net *net)
684{
685 struct xfrm_pol_inexact_bin *b;
686
687 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
688
689 list_for_each_entry(b, &net->xfrm.inexact_bins, inexact_bins)
690 INIT_HLIST_HEAD(&b->hhead);
691}
692
693/* Make sure *pol can be inserted into fastbin. 683/* Make sure *pol can be inserted into fastbin.
694 * Useful to check that later insert requests will be sucessful 684 * Useful to check that later insert requests will be sucessful
695 * (provided xfrm_policy_lock is held throughout). 685 * (provided xfrm_policy_lock is held throughout).
@@ -833,13 +823,13 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
833 u16 family) 823 u16 family)
834{ 824{
835 unsigned int matched_s, matched_d; 825 unsigned int matched_s, matched_d;
836 struct hlist_node *newpos = NULL;
837 struct xfrm_policy *policy, *p; 826 struct xfrm_policy *policy, *p;
838 827
839 matched_s = 0; 828 matched_s = 0;
840 matched_d = 0; 829 matched_d = 0;
841 830
842 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 831 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
832 struct hlist_node *newpos = NULL;
843 bool matches_s, matches_d; 833 bool matches_s, matches_d;
844 834
845 if (!policy->bydst_reinsert) 835 if (!policy->bydst_reinsert)
@@ -849,16 +839,19 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
849 839
850 policy->bydst_reinsert = false; 840 policy->bydst_reinsert = false;
851 hlist_for_each_entry(p, &n->hhead, bydst) { 841 hlist_for_each_entry(p, &n->hhead, bydst) {
852 if (policy->priority >= p->priority) 842 if (policy->priority > p->priority)
843 newpos = &p->bydst;
844 else if (policy->priority == p->priority &&
845 policy->pos > p->pos)
853 newpos = &p->bydst; 846 newpos = &p->bydst;
854 else 847 else
855 break; 848 break;
856 } 849 }
857 850
858 if (newpos) 851 if (newpos)
859 hlist_add_behind(&policy->bydst, newpos); 852 hlist_add_behind_rcu(&policy->bydst, newpos);
860 else 853 else
861 hlist_add_head(&policy->bydst, &n->hhead); 854 hlist_add_head_rcu(&policy->bydst, &n->hhead);
862 855
863 /* paranoia checks follow. 856 /* paranoia checks follow.
864 * Check that the reinserted policy matches at least 857 * Check that the reinserted policy matches at least
@@ -893,12 +886,13 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
893 struct rb_root *new, 886 struct rb_root *new,
894 u16 family) 887 u16 family)
895{ 888{
896 struct rb_node **p, *parent = NULL;
897 struct xfrm_pol_inexact_node *node; 889 struct xfrm_pol_inexact_node *node;
890 struct rb_node **p, *parent;
898 891
899 /* we should not have another subtree here */ 892 /* we should not have another subtree here */
900 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); 893 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
901 894restart:
895 parent = NULL;
902 p = &new->rb_node; 896 p = &new->rb_node;
903 while (*p) { 897 while (*p) {
904 u8 prefixlen; 898 u8 prefixlen;
@@ -918,12 +912,11 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
918 } else { 912 } else {
919 struct xfrm_policy *tmp; 913 struct xfrm_policy *tmp;
920 914
921 hlist_for_each_entry(tmp, &node->hhead, bydst) 915 hlist_for_each_entry(tmp, &n->hhead, bydst) {
922 tmp->bydst_reinsert = true;
923 hlist_for_each_entry(tmp, &n->hhead, bydst)
924 tmp->bydst_reinsert = true; 916 tmp->bydst_reinsert = true;
917 hlist_del_rcu(&tmp->bydst);
918 }
925 919
926 INIT_HLIST_HEAD(&node->hhead);
927 xfrm_policy_inexact_list_reinsert(net, node, family); 920 xfrm_policy_inexact_list_reinsert(net, node, family);
928 921
929 if (node->prefixlen == n->prefixlen) { 922 if (node->prefixlen == n->prefixlen) {
@@ -935,8 +928,7 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
935 kfree_rcu(n, rcu); 928 kfree_rcu(n, rcu);
936 n = node; 929 n = node;
937 n->prefixlen = prefixlen; 930 n->prefixlen = prefixlen;
938 *p = new->rb_node; 931 goto restart;
939 parent = NULL;
940 } 932 }
941 } 933 }
942 934
@@ -965,12 +957,11 @@ static void xfrm_policy_inexact_node_merge(struct net *net,
965 family); 957 family);
966 } 958 }
967 959
968 hlist_for_each_entry(tmp, &v->hhead, bydst) 960 hlist_for_each_entry(tmp, &v->hhead, bydst) {
969 tmp->bydst_reinsert = true;
970 hlist_for_each_entry(tmp, &n->hhead, bydst)
971 tmp->bydst_reinsert = true; 961 tmp->bydst_reinsert = true;
962 hlist_del_rcu(&tmp->bydst);
963 }
972 964
973 INIT_HLIST_HEAD(&n->hhead);
974 xfrm_policy_inexact_list_reinsert(net, n, family); 965 xfrm_policy_inexact_list_reinsert(net, n, family);
975} 966}
976 967
@@ -1235,6 +1226,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
1235 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); 1226 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1236 1227
1237 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1228 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1229 write_seqcount_begin(&xfrm_policy_hash_generation);
1238 1230
1239 /* make sure that we can insert the indirect policies again before 1231 /* make sure that we can insert the indirect policies again before
1240 * we start with destructive action. 1232 * we start with destructive action.
@@ -1278,10 +1270,14 @@ static void xfrm_hash_rebuild(struct work_struct *work)
1278 } 1270 }
1279 1271
1280 /* reset the bydst and inexact table in all directions */ 1272 /* reset the bydst and inexact table in all directions */
1281 xfrm_hash_reset_inexact_table(net);
1282
1283 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 1273 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1284 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 1274 struct hlist_node *n;
1275
1276 hlist_for_each_entry_safe(policy, n,
1277 &net->xfrm.policy_inexact[dir],
1278 bydst_inexact_list)
1279 hlist_del_init(&policy->bydst_inexact_list);
1280
1285 hmask = net->xfrm.policy_bydst[dir].hmask; 1281 hmask = net->xfrm.policy_bydst[dir].hmask;
1286 odst = net->xfrm.policy_bydst[dir].table; 1282 odst = net->xfrm.policy_bydst[dir].table;
1287 for (i = hmask; i >= 0; i--) 1283 for (i = hmask; i >= 0; i--)
@@ -1313,6 +1309,9 @@ static void xfrm_hash_rebuild(struct work_struct *work)
1313 newpos = NULL; 1309 newpos = NULL;
1314 chain = policy_hash_bysel(net, &policy->selector, 1310 chain = policy_hash_bysel(net, &policy->selector,
1315 policy->family, dir); 1311 policy->family, dir);
1312
1313 hlist_del_rcu(&policy->bydst);
1314
1316 if (!chain) { 1315 if (!chain) {
1317 void *p = xfrm_policy_inexact_insert(policy, dir, 0); 1316 void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1318 1317
@@ -1334,6 +1333,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
1334 1333
1335out_unlock: 1334out_unlock:
1336 __xfrm_policy_inexact_flush(net); 1335 __xfrm_policy_inexact_flush(net);
1336 write_seqcount_end(&xfrm_policy_hash_generation);
1337 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1337 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1338 1338
1339 mutex_unlock(&hash_resize_mutex); 1339 mutex_unlock(&hash_resize_mutex);
@@ -2600,7 +2600,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2600 dst_copy_metrics(dst1, dst); 2600 dst_copy_metrics(dst1, dst);
2601 2601
2602 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 2602 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2603 __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); 2603 __u32 mark = 0;
2604
2605 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2606 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2604 2607
2605 family = xfrm[i]->props.family; 2608 family = xfrm[i]->props.family;
2606 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, 2609 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
@@ -3311,8 +3314,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3311 3314
3312 if (ifcb) { 3315 if (ifcb) {
3313 xi = ifcb->decode_session(skb); 3316 xi = ifcb->decode_session(skb);
3314 if (xi) 3317 if (xi) {
3315 if_id = xi->p.if_id; 3318 if_id = xi->p.if_id;
3319 net = xi->net;
3320 }
3316 } 3321 }
3317 rcu_read_unlock(); 3322 rcu_read_unlock();
3318 3323
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 23c92891758a..1bb971f46fc6 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x)
432} 432}
433EXPORT_SYMBOL(xfrm_state_free); 433EXPORT_SYMBOL(xfrm_state_free);
434 434
435static void xfrm_state_gc_destroy(struct xfrm_state *x) 435static void ___xfrm_state_destroy(struct xfrm_state *x)
436{ 436{
437 tasklet_hrtimer_cancel(&x->mtimer); 437 tasklet_hrtimer_cancel(&x->mtimer);
438 del_timer_sync(&x->rtimer); 438 del_timer_sync(&x->rtimer);
@@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
474 synchronize_rcu(); 474 synchronize_rcu();
475 475
476 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) 476 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
477 xfrm_state_gc_destroy(x); 477 ___xfrm_state_destroy(x);
478} 478}
479 479
480static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) 480static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
@@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
598} 598}
599EXPORT_SYMBOL(xfrm_state_alloc); 599EXPORT_SYMBOL(xfrm_state_alloc);
600 600
601void __xfrm_state_destroy(struct xfrm_state *x) 601void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
602{ 602{
603 WARN_ON(x->km.state != XFRM_STATE_DEAD); 603 WARN_ON(x->km.state != XFRM_STATE_DEAD);
604 604
605 spin_lock_bh(&xfrm_state_gc_lock); 605 if (sync) {
606 hlist_add_head(&x->gclist, &xfrm_state_gc_list); 606 synchronize_rcu();
607 spin_unlock_bh(&xfrm_state_gc_lock); 607 ___xfrm_state_destroy(x);
608 schedule_work(&xfrm_state_gc_work); 608 } else {
609 spin_lock_bh(&xfrm_state_gc_lock);
610 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
611 spin_unlock_bh(&xfrm_state_gc_lock);
612 schedule_work(&xfrm_state_gc_work);
613 }
609} 614}
610EXPORT_SYMBOL(__xfrm_state_destroy); 615EXPORT_SYMBOL(__xfrm_state_destroy);
611 616
@@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
708} 713}
709#endif 714#endif
710 715
711int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) 716int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
712{ 717{
713 int i, err = 0, cnt = 0; 718 int i, err = 0, cnt = 0;
714 719
@@ -730,7 +735,10 @@ restart:
730 err = xfrm_state_delete(x); 735 err = xfrm_state_delete(x);
731 xfrm_audit_state_delete(x, err ? 0 : 1, 736 xfrm_audit_state_delete(x, err ? 0 : 1,
732 task_valid); 737 task_valid);
733 xfrm_state_put(x); 738 if (sync)
739 xfrm_state_put_sync(x);
740 else
741 xfrm_state_put(x);
734 if (!err) 742 if (!err)
735 cnt++; 743 cnt++;
736 744
@@ -2215,7 +2223,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
2215 if (atomic_read(&t->tunnel_users) == 2) 2223 if (atomic_read(&t->tunnel_users) == 2)
2216 xfrm_state_delete(t); 2224 xfrm_state_delete(t);
2217 atomic_dec(&t->tunnel_users); 2225 atomic_dec(&t->tunnel_users);
2218 xfrm_state_put(t); 2226 xfrm_state_put_sync(t);
2219 x->tunnel = NULL; 2227 x->tunnel = NULL;
2220 } 2228 }
2221} 2229}
@@ -2375,8 +2383,8 @@ void xfrm_state_fini(struct net *net)
2375 unsigned int sz; 2383 unsigned int sz;
2376 2384
2377 flush_work(&net->xfrm.state_hash_work); 2385 flush_work(&net->xfrm.state_hash_work);
2378 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
2379 flush_work(&xfrm_state_gc_work); 2386 flush_work(&xfrm_state_gc_work);
2387 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
2380 2388
2381 WARN_ON(!list_empty(&net->xfrm.state_all)); 2389 WARN_ON(!list_empty(&net->xfrm.state_all));
2382 2390
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 277c1c46fe94..a131f9ff979e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1488 if (!ut[i].family) 1488 if (!ut[i].family)
1489 ut[i].family = family; 1489 ut[i].family = family;
1490 1490
1491 if ((ut[i].mode == XFRM_MODE_TRANSPORT) && 1491 switch (ut[i].mode) {
1492 (ut[i].family != prev_family)) 1492 case XFRM_MODE_TUNNEL:
1493 return -EINVAL; 1493 case XFRM_MODE_BEET:
1494 1494 break;
1495 default:
1496 if (ut[i].family != prev_family)
1497 return -EINVAL;
1498 break;
1499 }
1495 if (ut[i].mode >= XFRM_MODE_MAX) 1500 if (ut[i].mode >= XFRM_MODE_MAX)
1496 return -EINVAL; 1501 return -EINVAL;
1497 1502
@@ -1927,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1927 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1932 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1928 int err; 1933 int err;
1929 1934
1930 err = xfrm_state_flush(net, p->proto, true); 1935 err = xfrm_state_flush(net, p->proto, true, false);
1931 if (err) { 1936 if (err) {
1932 if (err == -ESRCH) /* empty table */ 1937 if (err == -ESRCH) /* empty table */
1933 return 0; 1938 return 0;
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 66ae15f27c70..db1a91dfa702 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -279,6 +279,7 @@ $(obj)/%.o: $(src)/%.c
279 -Wno-gnu-variable-sized-type-not-at-end \ 279 -Wno-gnu-variable-sized-type-not-at-end \
280 -Wno-address-of-packed-member -Wno-tautological-compare \ 280 -Wno-address-of-packed-member -Wno-tautological-compare \
281 -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ 281 -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
282 -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
282 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ 283 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
283ifeq ($(DWARF2BTF),y) 284ifeq ($(DWARF2BTF),y)
284 $(BTF_PAHOLE) -J $@ 285 $(BTF_PAHOLE) -J $@
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
new file mode 100644
index 000000000000..5cd7c1d1a5d5
--- /dev/null
+++ b/samples/bpf/asm_goto_workaround.h
@@ -0,0 +1,16 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019 Facebook */
3#ifndef __ASM_GOTO_WORKAROUND_H
4#define __ASM_GOTO_WORKAROUND_H
5
6/* this will bring in asm_volatile_goto macro definition
7 * if enabled by compiler and config options.
8 */
9#include <linux/types.h>
10
11#ifdef asm_volatile_goto
12#undef asm_volatile_goto
13#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
14#endif
15
16#endif
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index d7b68ef5ba79..0bb6507256b7 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -77,7 +77,7 @@ static int test_foo_bar(void)
77 77
78 /* Create cgroup /foo, get fd, and join it */ 78 /* Create cgroup /foo, get fd, and join it */
79 foo = create_and_get_cgroup(FOO); 79 foo = create_and_get_cgroup(FOO);
80 if (!foo) 80 if (foo < 0)
81 goto err; 81 goto err;
82 82
83 if (join_cgroup(FOO)) 83 if (join_cgroup(FOO))
@@ -94,7 +94,7 @@ static int test_foo_bar(void)
94 94
95 /* Create cgroup /foo/bar, get fd, and join it */ 95 /* Create cgroup /foo/bar, get fd, and join it */
96 bar = create_and_get_cgroup(BAR); 96 bar = create_and_get_cgroup(BAR);
97 if (!bar) 97 if (bar < 0)
98 goto err; 98 goto err;
99 99
100 if (join_cgroup(BAR)) 100 if (join_cgroup(BAR))
@@ -298,19 +298,19 @@ static int test_multiprog(void)
298 goto err; 298 goto err;
299 299
300 cg1 = create_and_get_cgroup("/cg1"); 300 cg1 = create_and_get_cgroup("/cg1");
301 if (!cg1) 301 if (cg1 < 0)
302 goto err; 302 goto err;
303 cg2 = create_and_get_cgroup("/cg1/cg2"); 303 cg2 = create_and_get_cgroup("/cg1/cg2");
304 if (!cg2) 304 if (cg2 < 0)
305 goto err; 305 goto err;
306 cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); 306 cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
307 if (!cg3) 307 if (cg3 < 0)
308 goto err; 308 goto err;
309 cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); 309 cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
310 if (!cg4) 310 if (cg4 < 0)
311 goto err; 311 goto err;
312 cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); 312 cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
313 if (!cg5) 313 if (cg5 < 0)
314 goto err; 314 goto err;
315 315
316 if (join_cgroup("/cg1/cg2/cg3/cg4/cg5")) 316 if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c
index 2259f997a26c..f082d6ac59f0 100644
--- a/samples/bpf/test_current_task_under_cgroup_user.c
+++ b/samples/bpf/test_current_task_under_cgroup_user.c
@@ -32,7 +32,7 @@ int main(int argc, char **argv)
32 32
33 cg2 = create_and_get_cgroup(CGROUP_PATH); 33 cg2 = create_and_get_cgroup(CGROUP_PATH);
34 34
35 if (!cg2) 35 if (cg2 < 0)
36 goto err; 36 goto err;
37 37
38 if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) { 38 if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
index 0a197f86ac43..8bfda95c77ad 100644
--- a/samples/bpf/xdp1_user.c
+++ b/samples/bpf/xdp1_user.c
@@ -103,7 +103,7 @@ int main(int argc, char **argv)
103 return 1; 103 return 1;
104 } 104 }
105 105
106 ifindex = if_nametoindex(argv[1]); 106 ifindex = if_nametoindex(argv[optind]);
107 if (!ifindex) { 107 if (!ifindex) {
108 perror("if_nametoindex"); 108 perror("if_nametoindex");
109 return 1; 109 return 1;
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
index 33e67bd1dc34..32234481ad7d 100644
--- a/samples/mei/mei-amt-version.c
+++ b/samples/mei/mei-amt-version.c
@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
117 117
118 me->verbose = verbose; 118 me->verbose = verbose;
119 119
120 me->fd = open("/dev/mei", O_RDWR); 120 me->fd = open("/dev/mei0", O_RDWR);
121 if (me->fd == -1) { 121 if (me->fd == -1) {
122 mei_err(me, "Cannot establish a handle to the Intel MEI driver\n"); 122 mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
123 goto err; 123 goto err;
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile
index 4920903c8009..fb43a814d4c0 100644
--- a/samples/seccomp/Makefile
+++ b/samples/seccomp/Makefile
@@ -34,6 +34,7 @@ HOSTCFLAGS_bpf-direct.o += $(MFLAG)
34HOSTCFLAGS_dropper.o += $(MFLAG) 34HOSTCFLAGS_dropper.o += $(MFLAG)
35HOSTCFLAGS_bpf-helper.o += $(MFLAG) 35HOSTCFLAGS_bpf-helper.o += $(MFLAG)
36HOSTCFLAGS_bpf-fancy.o += $(MFLAG) 36HOSTCFLAGS_bpf-fancy.o += $(MFLAG)
37HOSTCFLAGS_user-trap.o += $(MFLAG)
37HOSTLDLIBS_bpf-direct += $(MFLAG) 38HOSTLDLIBS_bpf-direct += $(MFLAG)
38HOSTLDLIBS_bpf-fancy += $(MFLAG) 39HOSTLDLIBS_bpf-fancy += $(MFLAG)
39HOSTLDLIBS_dropper += $(MFLAG) 40HOSTLDLIBS_dropper += $(MFLAG)
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 525bff667a52..30816037036e 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -24,10 +24,6 @@ depfile = $(subst $(comma),_,$(dot-target).d)
24basetarget = $(basename $(notdir $@)) 24basetarget = $(basename $(notdir $@))
25 25
26### 26###
27# filename of first prerequisite with directory and extension stripped
28baseprereq = $(basename $(notdir $<))
29
30###
31# Escape single quote for use in echo statements 27# Escape single quote for use in echo statements
32escsq = $(subst $(squote),'\$(squote)',$1) 28escsq = $(subst $(squote),'\$(squote)',$1)
33 29
diff --git a/scripts/coccinelle/api/alloc/alloc_cast.cocci b/scripts/coccinelle/api/alloc/alloc_cast.cocci
index 408ee3879f9b..18fedf7c60ed 100644
--- a/scripts/coccinelle/api/alloc/alloc_cast.cocci
+++ b/scripts/coccinelle/api/alloc/alloc_cast.cocci
@@ -32,7 +32,7 @@ type T;
32 (T *) 32 (T *)
33 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 33 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
34 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 34 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
35 dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 35 dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
36 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 36 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
37 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 37 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
38 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) 38 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
@@ -55,7 +55,7 @@ type r1.T;
55* (T *) 55* (T *)
56 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 56 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
57 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 57 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
58 dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 58 dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
59 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 59 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
60 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 60 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
61 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) 61 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
@@ -78,7 +78,7 @@ type r1.T;
78- (T *) 78- (T *)
79 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 79 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
80 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 80 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
81 dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 81 dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
82 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 82 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
83 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 83 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
84 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) 84 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
@@ -95,7 +95,7 @@ position p;
95 (T@p *) 95 (T@p *)
96 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 96 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
97 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 97 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
98 dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 98 dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
99 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 99 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
100 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 100 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
101 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) 101 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
diff --git a/scripts/coccinelle/api/alloc/zalloc-simple.cocci b/scripts/coccinelle/api/alloc/zalloc-simple.cocci
index d819275b7fde..5cd1991c582e 100644
--- a/scripts/coccinelle/api/alloc/zalloc-simple.cocci
+++ b/scripts/coccinelle/api/alloc/zalloc-simple.cocci
@@ -69,15 +69,6 @@ statement S;
69- x = (T)vmalloc(E1); 69- x = (T)vmalloc(E1);
70+ x = (T)vzalloc(E1); 70+ x = (T)vzalloc(E1);
71| 71|
72- x = dma_alloc_coherent(E2,E1,E3,E4);
73+ x = dma_zalloc_coherent(E2,E1,E3,E4);
74|
75- x = (T *)dma_alloc_coherent(E2,E1,E3,E4);
76+ x = dma_zalloc_coherent(E2,E1,E3,E4);
77|
78- x = (T)dma_alloc_coherent(E2,E1,E3,E4);
79+ x = (T)dma_zalloc_coherent(E2,E1,E3,E4);
80|
81- x = kmalloc_node(E1,E2,E3); 72- x = kmalloc_node(E1,E2,E3);
82+ x = kzalloc_node(E1,E2,E3); 73+ x = kzalloc_node(E1,E2,E3);
83| 74|
@@ -225,7 +216,7 @@ p << r2.p;
225x << r2.x; 216x << r2.x;
226@@ 217@@
227 218
228msg="WARNING: dma_zalloc_coherent should be used for %s, instead of dma_alloc_coherent/memset" % (x) 219msg="WARNING: dma_alloc_coherent use in %s already zeroes out memory, so memset is not needed" % (x)
229coccilib.report.print_report(p[0], msg) 220coccilib.report.print_report(p[0], msg)
230 221
231//----------------------------------------------------------------- 222//-----------------------------------------------------------------
diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
index de70b8470971..89c47f57d1ce 100644
--- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
+++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
@@ -13,7 +13,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
13 for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { 13 for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
14 const char *sym; 14 const char *sym;
15 rtx body; 15 rtx body;
16 rtx masked_sp; 16 rtx mask, masked_sp;
17 17
18 /* 18 /*
19 * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard 19 * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard
@@ -33,12 +33,13 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
33 * produces the address of the copy of the stack canary value 33 * produces the address of the copy of the stack canary value
34 * stored in struct thread_info 34 * stored in struct thread_info
35 */ 35 */
36 mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
36 masked_sp = gen_reg_rtx(Pmode); 37 masked_sp = gen_reg_rtx(Pmode);
37 38
38 emit_insn_before(gen_rtx_SET(masked_sp, 39 emit_insn_before(gen_rtx_SET(masked_sp,
39 gen_rtx_AND(Pmode, 40 gen_rtx_AND(Pmode,
40 stack_pointer_rtx, 41 stack_pointer_rtx,
41 GEN_INT(sp_mask))), 42 mask)),
42 insn); 43 insn);
43 44
44 SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, 45 SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp,
@@ -52,6 +53,19 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
52#define NO_GATE 53#define NO_GATE
53#include "gcc-generate-rtl-pass.h" 54#include "gcc-generate-rtl-pass.h"
54 55
56#if BUILDING_GCC_VERSION >= 9000
57static bool no(void)
58{
59 return false;
60}
61
62static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data)
63{
64 targetm.have_stack_protect_combined_set = no;
65 targetm.have_stack_protect_combined_test = no;
66}
67#endif
68
55__visible int plugin_init(struct plugin_name_args *plugin_info, 69__visible int plugin_init(struct plugin_name_args *plugin_info,
56 struct plugin_gcc_version *version) 70 struct plugin_gcc_version *version)
57{ 71{
@@ -99,5 +113,10 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
99 register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, 113 register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP,
100 NULL, &arm_pertask_ssp_rtl_pass_info); 114 NULL, &arm_pertask_ssp_rtl_pass_info);
101 115
116#if BUILDING_GCC_VERSION >= 9000
117 register_callback(plugin_info->base_name, PLUGIN_START_UNIT,
118 arm_pertask_ssp_start_unit, NULL);
119#endif
120
102 return 0; 121 return 0;
103} 122}
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 77cebad0474e..f75e7bda4889 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -118,8 +118,8 @@ static int read_symbol(FILE *in, struct sym_entry *s)
118 fprintf(stderr, "Read error or end of file.\n"); 118 fprintf(stderr, "Read error or end of file.\n");
119 return -1; 119 return -1;
120 } 120 }
121 if (strlen(sym) > KSYM_NAME_LEN) { 121 if (strlen(sym) >= KSYM_NAME_LEN) {
122 fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n" 122 fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n"
123 "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n", 123 "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n",
124 sym, strlen(sym), KSYM_NAME_LEN); 124 sym, strlen(sym), KSYM_NAME_LEN);
125 return -1; 125 return -1;
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index c05ab001b54c..181973509a05 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -206,4 +206,4 @@ filechk_conf_cfg = $(CONFIG_SHELL) $<
206$(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE 206$(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE
207 $(call filechk,conf_cfg) 207 $(call filechk,conf_cfg)
208 208
209clean-files += conf-cfg 209clean-files += *conf-cfg
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0de2fb236640..26bf886bd168 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -2185,7 +2185,7 @@ static void add_intree_flag(struct buffer *b, int is_intree)
2185/* Cannot check for assembler */ 2185/* Cannot check for assembler */
2186static void add_retpoline(struct buffer *b) 2186static void add_retpoline(struct buffer *b)
2187{ 2187{
2188 buf_printf(b, "\n#ifdef RETPOLINE\n"); 2188 buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n");
2189 buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n"); 2189 buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
2190 buf_printf(b, "#endif\n"); 2190 buf_printf(b, "#endif\n");
2191} 2191}
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 08c88de0ffda..11975ec8d566 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -1444,7 +1444,10 @@ check:
1444 new = aa_label_merge(label, target, GFP_KERNEL); 1444 new = aa_label_merge(label, target, GFP_KERNEL);
1445 if (IS_ERR_OR_NULL(new)) { 1445 if (IS_ERR_OR_NULL(new)) {
1446 info = "failed to build target label"; 1446 info = "failed to build target label";
1447 error = PTR_ERR(new); 1447 if (!new)
1448 error = -ENOMEM;
1449 else
1450 error = PTR_ERR(new);
1448 new = NULL; 1451 new = NULL;
1449 perms.allow = 0; 1452 perms.allow = 0;
1450 goto audit; 1453 goto audit;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 2c010874329f..8db1731d046a 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -1599,12 +1599,14 @@ static unsigned int apparmor_ipv4_postroute(void *priv,
1599 return apparmor_ip_postroute(priv, skb, state); 1599 return apparmor_ip_postroute(priv, skb, state);
1600} 1600}
1601 1601
1602#if IS_ENABLED(CONFIG_IPV6)
1602static unsigned int apparmor_ipv6_postroute(void *priv, 1603static unsigned int apparmor_ipv6_postroute(void *priv,
1603 struct sk_buff *skb, 1604 struct sk_buff *skb,
1604 const struct nf_hook_state *state) 1605 const struct nf_hook_state *state)
1605{ 1606{
1606 return apparmor_ip_postroute(priv, skb, state); 1607 return apparmor_ip_postroute(priv, skb, state);
1607} 1608}
1609#endif
1608 1610
1609static const struct nf_hook_ops apparmor_nf_ops[] = { 1611static const struct nf_hook_ops apparmor_nf_ops[] = {
1610 { 1612 {
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 479909b858c7..8f533c81aa8d 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -186,20 +186,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm)
186 return key_task_permission(key_ref, current_cred(), perm); 186 return key_task_permission(key_ref, current_cred(), perm);
187} 187}
188 188
189/*
190 * Authorisation record for request_key().
191 */
192struct request_key_auth {
193 struct key *target_key;
194 struct key *dest_keyring;
195 const struct cred *cred;
196 void *callout_info;
197 size_t callout_len;
198 pid_t pid;
199} __randomize_layout;
200
201extern struct key_type key_type_request_key_auth; 189extern struct key_type key_type_request_key_auth;
202extern struct key *request_key_auth_new(struct key *target, 190extern struct key *request_key_auth_new(struct key *target,
191 const char *op,
203 const void *callout_info, 192 const void *callout_info,
204 size_t callout_len, 193 size_t callout_len,
205 struct key *dest_keyring); 194 struct key *dest_keyring);
diff --git a/security/keys/key.c b/security/keys/key.c
index 44a80d6741a1..696f1c092c50 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
265 265
266 spin_lock(&user->lock); 266 spin_lock(&user->lock);
267 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { 267 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
268 if (user->qnkeys + 1 >= maxkeys || 268 if (user->qnkeys + 1 > maxkeys ||
269 user->qnbytes + quotalen >= maxbytes || 269 user->qnbytes + quotalen > maxbytes ||
270 user->qnbytes + quotalen < user->qnbytes) 270 user->qnbytes + quotalen < user->qnbytes)
271 goto no_quota; 271 goto no_quota;
272 } 272 }
@@ -297,6 +297,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
297 key->gid = gid; 297 key->gid = gid;
298 key->perm = perm; 298 key->perm = perm;
299 key->restrict_link = restrict_link; 299 key->restrict_link = restrict_link;
300 key->last_used_at = ktime_get_real_seconds();
300 301
301 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) 302 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
302 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 303 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index e8093d025966..7bbe03593e58 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -25,6 +25,7 @@
25#include <linux/security.h> 25#include <linux/security.h>
26#include <linux/uio.h> 26#include <linux/uio.h>
27#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <keys/request_key_auth-type.h>
28#include "internal.h" 29#include "internal.h"
29 30
30#define KEY_MAX_DESC_SIZE 4096 31#define KEY_MAX_DESC_SIZE 4096
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index eadebb92986a..f81372f53dd7 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
661 BUG_ON((ctx->flags & STATE_CHECKS) == 0 || 661 BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
662 (ctx->flags & STATE_CHECKS) == STATE_CHECKS); 662 (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
663 663
664 if (ctx->index_key.description)
665 ctx->index_key.desc_len = strlen(ctx->index_key.description);
666
667 /* Check to see if this top-level keyring is what we are looking for 664 /* Check to see if this top-level keyring is what we are looking for
668 * and whether it is valid or not. 665 * and whether it is valid or not.
669 */ 666 */
@@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
914 struct keyring_search_context ctx = { 911 struct keyring_search_context ctx = {
915 .index_key.type = type, 912 .index_key.type = type,
916 .index_key.description = description, 913 .index_key.description = description,
914 .index_key.desc_len = strlen(description),
917 .cred = current_cred(), 915 .cred = current_cred(),
918 .match_data.cmp = key_default_cmp, 916 .match_data.cmp = key_default_cmp,
919 .match_data.raw_data = description, 917 .match_data.raw_data = description,
diff --git a/security/keys/proc.c b/security/keys/proc.c
index d2b802072693..78ac305d715e 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -165,8 +165,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
165 int rc; 165 int rc;
166 166
167 struct keyring_search_context ctx = { 167 struct keyring_search_context ctx = {
168 .index_key.type = key->type, 168 .index_key = key->index_key,
169 .index_key.description = key->description,
170 .cred = m->file->f_cred, 169 .cred = m->file->f_cred,
171 .match_data.cmp = lookup_user_key_possessed, 170 .match_data.cmp = lookup_user_key_possessed,
172 .match_data.raw_data = key, 171 .match_data.raw_data = key,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 02c77e928f68..0e0b9ccad2f8 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -19,6 +19,7 @@
19#include <linux/security.h> 19#include <linux/security.h>
20#include <linux/user_namespace.h> 20#include <linux/user_namespace.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <keys/request_key_auth-type.h>
22#include "internal.h" 23#include "internal.h"
23 24
24/* Session keyring create vs join semaphore */ 25/* Session keyring create vs join semaphore */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 301f0e300dbd..7a0c6b666ff0 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -18,31 +18,30 @@
18#include <linux/keyctl.h> 18#include <linux/keyctl.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include "internal.h" 20#include "internal.h"
21#include <keys/request_key_auth-type.h>
21 22
22#define key_negative_timeout 60 /* default timeout on a negative key's existence */ 23#define key_negative_timeout 60 /* default timeout on a negative key's existence */
23 24
24/** 25/**
25 * complete_request_key - Complete the construction of a key. 26 * complete_request_key - Complete the construction of a key.
26 * @cons: The key construction record. 27 * @auth_key: The authorisation key.
27 * @error: The success or failute of the construction. 28 * @error: The success or failute of the construction.
28 * 29 *
29 * Complete the attempt to construct a key. The key will be negated 30 * Complete the attempt to construct a key. The key will be negated
30 * if an error is indicated. The authorisation key will be revoked 31 * if an error is indicated. The authorisation key will be revoked
31 * unconditionally. 32 * unconditionally.
32 */ 33 */
33void complete_request_key(struct key_construction *cons, int error) 34void complete_request_key(struct key *authkey, int error)
34{ 35{
35 kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error); 36 struct request_key_auth *rka = get_request_key_auth(authkey);
37 struct key *key = rka->target_key;
38
39 kenter("%d{%d},%d", authkey->serial, key->serial, error);
36 40
37 if (error < 0) 41 if (error < 0)
38 key_negate_and_link(cons->key, key_negative_timeout, NULL, 42 key_negate_and_link(key, key_negative_timeout, NULL, authkey);
39 cons->authkey);
40 else 43 else
41 key_revoke(cons->authkey); 44 key_revoke(authkey);
42
43 key_put(cons->key);
44 key_put(cons->authkey);
45 kfree(cons);
46} 45}
47EXPORT_SYMBOL(complete_request_key); 46EXPORT_SYMBOL(complete_request_key);
48 47
@@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp,
91 * Request userspace finish the construction of a key 90 * Request userspace finish the construction of a key
92 * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" 91 * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
93 */ 92 */
94static int call_sbin_request_key(struct key_construction *cons, 93static int call_sbin_request_key(struct key *authkey, void *aux)
95 const char *op,
96 void *aux)
97{ 94{
98 static char const request_key[] = "/sbin/request-key"; 95 static char const request_key[] = "/sbin/request-key";
96 struct request_key_auth *rka = get_request_key_auth(authkey);
99 const struct cred *cred = current_cred(); 97 const struct cred *cred = current_cred();
100 key_serial_t prkey, sskey; 98 key_serial_t prkey, sskey;
101 struct key *key = cons->key, *authkey = cons->authkey, *keyring, 99 struct key *key = rka->target_key, *keyring, *session;
102 *session;
103 char *argv[9], *envp[3], uid_str[12], gid_str[12]; 100 char *argv[9], *envp[3], uid_str[12], gid_str[12];
104 char key_str[12], keyring_str[3][12]; 101 char key_str[12], keyring_str[3][12];
105 char desc[20]; 102 char desc[20];
106 int ret, i; 103 int ret, i;
107 104
108 kenter("{%d},{%d},%s", key->serial, authkey->serial, op); 105 kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
109 106
110 ret = install_user_keyrings(); 107 ret = install_user_keyrings();
111 if (ret < 0) 108 if (ret < 0)
@@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons,
163 /* set up the argument list */ 160 /* set up the argument list */
164 i = 0; 161 i = 0;
165 argv[i++] = (char *)request_key; 162 argv[i++] = (char *)request_key;
166 argv[i++] = (char *) op; 163 argv[i++] = (char *)rka->op;
167 argv[i++] = key_str; 164 argv[i++] = key_str;
168 argv[i++] = uid_str; 165 argv[i++] = uid_str;
169 argv[i++] = gid_str; 166 argv[i++] = gid_str;
@@ -191,7 +188,7 @@ error_link:
191 key_put(keyring); 188 key_put(keyring);
192 189
193error_alloc: 190error_alloc:
194 complete_request_key(cons, ret); 191 complete_request_key(authkey, ret);
195 kleave(" = %d", ret); 192 kleave(" = %d", ret);
196 return ret; 193 return ret;
197} 194}
@@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info,
205 size_t callout_len, void *aux, 202 size_t callout_len, void *aux,
206 struct key *dest_keyring) 203 struct key *dest_keyring)
207{ 204{
208 struct key_construction *cons;
209 request_key_actor_t actor; 205 request_key_actor_t actor;
210 struct key *authkey; 206 struct key *authkey;
211 int ret; 207 int ret;
212 208
213 kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); 209 kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
214 210
215 cons = kmalloc(sizeof(*cons), GFP_KERNEL);
216 if (!cons)
217 return -ENOMEM;
218
219 /* allocate an authorisation key */ 211 /* allocate an authorisation key */
220 authkey = request_key_auth_new(key, callout_info, callout_len, 212 authkey = request_key_auth_new(key, "create", callout_info, callout_len,
221 dest_keyring); 213 dest_keyring);
222 if (IS_ERR(authkey)) { 214 if (IS_ERR(authkey))
223 kfree(cons); 215 return PTR_ERR(authkey);
224 ret = PTR_ERR(authkey);
225 authkey = NULL;
226 } else {
227 cons->authkey = key_get(authkey);
228 cons->key = key_get(key);
229 216
230 /* make the call */ 217 /* Make the call */
231 actor = call_sbin_request_key; 218 actor = call_sbin_request_key;
232 if (key->type->request_key) 219 if (key->type->request_key)
233 actor = key->type->request_key; 220 actor = key->type->request_key;
234 221
235 ret = actor(cons, "create", aux); 222 ret = actor(authkey, aux);
236 223
237 /* check that the actor called complete_request_key() prior to 224 /* check that the actor called complete_request_key() prior to
238 * returning an error */ 225 * returning an error */
239 WARN_ON(ret < 0 && 226 WARN_ON(ret < 0 &&
240 !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); 227 !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
241 key_put(authkey);
242 }
243 228
229 key_put(authkey);
244 kleave(" = %d", ret); 230 kleave(" = %d", ret);
245 return ret; 231 return ret;
246} 232}
@@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
275 if (cred->request_key_auth) { 261 if (cred->request_key_auth) {
276 authkey = cred->request_key_auth; 262 authkey = cred->request_key_auth;
277 down_read(&authkey->sem); 263 down_read(&authkey->sem);
278 rka = authkey->payload.data[0]; 264 rka = get_request_key_auth(authkey);
279 if (!test_bit(KEY_FLAG_REVOKED, 265 if (!test_bit(KEY_FLAG_REVOKED,
280 &authkey->flags)) 266 &authkey->flags))
281 dest_keyring = 267 dest_keyring =
@@ -545,6 +531,7 @@ struct key *request_key_and_link(struct key_type *type,
545 struct keyring_search_context ctx = { 531 struct keyring_search_context ctx = {
546 .index_key.type = type, 532 .index_key.type = type,
547 .index_key.description = description, 533 .index_key.description = description,
534 .index_key.desc_len = strlen(description),
548 .cred = current_cred(), 535 .cred = current_cred(),
549 .match_data.cmp = key_default_cmp, 536 .match_data.cmp = key_default_cmp,
550 .match_data.raw_data = description, 537 .match_data.raw_data = description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 87ea2f54dedc..bda6201c6c45 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -17,7 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include "internal.h" 19#include "internal.h"
20#include <keys/user-type.h> 20#include <keys/request_key_auth-type.h>
21 21
22static int request_key_auth_preparse(struct key_preparsed_payload *); 22static int request_key_auth_preparse(struct key_preparsed_payload *);
23static void request_key_auth_free_preparse(struct key_preparsed_payload *); 23static void request_key_auth_free_preparse(struct key_preparsed_payload *);
@@ -68,7 +68,7 @@ static int request_key_auth_instantiate(struct key *key,
68static void request_key_auth_describe(const struct key *key, 68static void request_key_auth_describe(const struct key *key,
69 struct seq_file *m) 69 struct seq_file *m)
70{ 70{
71 struct request_key_auth *rka = key->payload.data[0]; 71 struct request_key_auth *rka = get_request_key_auth(key);
72 72
73 seq_puts(m, "key:"); 73 seq_puts(m, "key:");
74 seq_puts(m, key->description); 74 seq_puts(m, key->description);
@@ -83,7 +83,7 @@ static void request_key_auth_describe(const struct key *key,
83static long request_key_auth_read(const struct key *key, 83static long request_key_auth_read(const struct key *key,
84 char __user *buffer, size_t buflen) 84 char __user *buffer, size_t buflen)
85{ 85{
86 struct request_key_auth *rka = key->payload.data[0]; 86 struct request_key_auth *rka = get_request_key_auth(key);
87 size_t datalen; 87 size_t datalen;
88 long ret; 88 long ret;
89 89
@@ -109,7 +109,7 @@ static long request_key_auth_read(const struct key *key,
109 */ 109 */
110static void request_key_auth_revoke(struct key *key) 110static void request_key_auth_revoke(struct key *key)
111{ 111{
112 struct request_key_auth *rka = key->payload.data[0]; 112 struct request_key_auth *rka = get_request_key_auth(key);
113 113
114 kenter("{%d}", key->serial); 114 kenter("{%d}", key->serial);
115 115
@@ -136,7 +136,7 @@ static void free_request_key_auth(struct request_key_auth *rka)
136 */ 136 */
137static void request_key_auth_destroy(struct key *key) 137static void request_key_auth_destroy(struct key *key)
138{ 138{
139 struct request_key_auth *rka = key->payload.data[0]; 139 struct request_key_auth *rka = get_request_key_auth(key);
140 140
141 kenter("{%d}", key->serial); 141 kenter("{%d}", key->serial);
142 142
@@ -147,8 +147,9 @@ static void request_key_auth_destroy(struct key *key)
147 * Create an authorisation token for /sbin/request-key or whoever to gain 147 * Create an authorisation token for /sbin/request-key or whoever to gain
148 * access to the caller's security data. 148 * access to the caller's security data.
149 */ 149 */
150struct key *request_key_auth_new(struct key *target, const void *callout_info, 150struct key *request_key_auth_new(struct key *target, const char *op,
151 size_t callout_len, struct key *dest_keyring) 151 const void *callout_info, size_t callout_len,
152 struct key *dest_keyring)
152{ 153{
153 struct request_key_auth *rka, *irka; 154 struct request_key_auth *rka, *irka;
154 const struct cred *cred = current->cred; 155 const struct cred *cred = current->cred;
@@ -166,6 +167,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
166 if (!rka->callout_info) 167 if (!rka->callout_info)
167 goto error_free_rka; 168 goto error_free_rka;
168 rka->callout_len = callout_len; 169 rka->callout_len = callout_len;
170 strlcpy(rka->op, op, sizeof(rka->op));
169 171
170 /* see if the calling process is already servicing the key request of 172 /* see if the calling process is already servicing the key request of
171 * another process */ 173 * another process */
@@ -245,7 +247,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
245 struct key *authkey; 247 struct key *authkey;
246 key_ref_t authkey_ref; 248 key_ref_t authkey_ref;
247 249
248 sprintf(description, "%x", target_id); 250 ctx.index_key.desc_len = sprintf(description, "%x", target_id);
249 251
250 authkey_ref = search_process_keyrings(&ctx); 252 authkey_ref = search_process_keyrings(&ctx);
251 253
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index f84001019356..33028c098ef3 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -321,6 +321,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
321 if (a->u.net->sk) { 321 if (a->u.net->sk) {
322 struct sock *sk = a->u.net->sk; 322 struct sock *sk = a->u.net->sk;
323 struct unix_sock *u; 323 struct unix_sock *u;
324 struct unix_address *addr;
324 int len = 0; 325 int len = 0;
325 char *p = NULL; 326 char *p = NULL;
326 327
@@ -351,14 +352,15 @@ static void dump_common_audit_data(struct audit_buffer *ab,
351#endif 352#endif
352 case AF_UNIX: 353 case AF_UNIX:
353 u = unix_sk(sk); 354 u = unix_sk(sk);
355 addr = smp_load_acquire(&u->addr);
356 if (!addr)
357 break;
354 if (u->path.dentry) { 358 if (u->path.dentry) {
355 audit_log_d_path(ab, " path=", &u->path); 359 audit_log_d_path(ab, " path=", &u->path);
356 break; 360 break;
357 } 361 }
358 if (!u->addr) 362 len = addr->len-sizeof(short);
359 break; 363 p = &addr->name->sun_path[0];
360 len = u->addr->len-sizeof(short);
361 p = &u->addr->name->sun_path[0];
362 audit_log_format(ab, " path="); 364 audit_log_format(ab, " path=");
363 if (*p) 365 if (*p)
364 audit_log_untrustedstring(ab, p); 366 audit_log_untrustedstring(ab, p);
diff --git a/security/security.c b/security/security.c
index f1b8d2587639..55bc49027ba9 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1027,6 +1027,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
1027 1027
1028void security_cred_free(struct cred *cred) 1028void security_cred_free(struct cred *cred)
1029{ 1029{
1030 /*
1031 * There is a failure case in prepare_creds() that
1032 * may result in a call here with ->security being NULL.
1033 */
1034 if (unlikely(cred->security == NULL))
1035 return;
1036
1030 call_void_hook(cred_free, cred); 1037 call_void_hook(cred_free, cred);
1031} 1038}
1032 1039
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index a50d625e7946..c1c31e33657a 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p)
732 kfree(key); 732 kfree(key);
733 if (datum) { 733 if (datum) {
734 levdatum = datum; 734 levdatum = datum;
735 ebitmap_destroy(&levdatum->level->cat); 735 if (levdatum->level)
736 ebitmap_destroy(&levdatum->level->cat);
736 kfree(levdatum->level); 737 kfree(levdatum->level);
737 } 738 }
738 kfree(datum); 739 kfree(datum);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index ffda91a4a1aa..02514fe558b4 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
368 break; 368 break;
369 case YAMA_SCOPE_RELATIONAL: 369 case YAMA_SCOPE_RELATIONAL:
370 rcu_read_lock(); 370 rcu_read_lock();
371 if (!task_is_descendant(current, child) && 371 if (!pid_alive(child))
372 rc = -EPERM;
373 if (!rc && !task_is_descendant(current, child) &&
372 !ptracer_exception_found(current, child) && 374 !ptracer_exception_found(current, child) &&
373 !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) 375 !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
374 rc = -EPERM; 376 rc = -EPERM;
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index c3f57a3fb1a5..40ebde2e1ab1 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -47,8 +47,8 @@ static int alloc_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev,
47 /* We use the PCI APIs for now until the generic one gets fixed 47 /* We use the PCI APIs for now until the generic one gets fixed
48 * enough or until we get some macio-specific versions 48 * enough or until we get some macio-specific versions
49 */ 49 */
50 r->space = dma_zalloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, 50 r->space = dma_alloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev,
51 r->size, &r->bus_addr, GFP_KERNEL); 51 r->size, &r->bus_addr, GFP_KERNEL);
52 if (!r->space) 52 if (!r->space)
53 return -ENOMEM; 53 return -ENOMEM;
54 54
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index a5b09e75e787..f7d2b373da0a 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -541,7 +541,8 @@ static int snd_compress_check_input(struct snd_compr_params *params)
541{ 541{
542 /* first let's check the buffer parameter's */ 542 /* first let's check the buffer parameter's */
543 if (params->buffer.fragment_size == 0 || 543 if (params->buffer.fragment_size == 0 ||
544 params->buffer.fragments > INT_MAX / params->buffer.fragment_size) 544 params->buffer.fragments > INT_MAX / params->buffer.fragment_size ||
545 params->buffer.fragments == 0)
545 return -EINVAL; 546 return -EINVAL;
546 547
547 /* now codec parameters */ 548 /* now codec parameters */
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 40013b26f671..6c0b30391ba9 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2177,16 +2177,11 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2177 snd_pcm_update_hw_ptr(substream); 2177 snd_pcm_update_hw_ptr(substream);
2178 2178
2179 if (!is_playback && 2179 if (!is_playback &&
2180 runtime->status->state == SNDRV_PCM_STATE_PREPARED) { 2180 runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2181 if (size >= runtime->start_threshold) { 2181 size >= runtime->start_threshold) {
2182 err = snd_pcm_start(substream); 2182 err = snd_pcm_start(substream);
2183 if (err < 0) 2183 if (err < 0)
2184 goto _end_unlock;
2185 } else {
2186 /* nothing to do */
2187 err = 0;
2188 goto _end_unlock; 2184 goto _end_unlock;
2189 }
2190 } 2185 }
2191 2186
2192 avail = snd_pcm_avail(substream); 2187 avail = snd_pcm_avail(substream);
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 598d140bb7cb..5fc497c6d738 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -903,6 +903,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip)
903 struct dsp_spos_instance * ins = chip->dsp_spos_instance; 903 struct dsp_spos_instance * ins = chip->dsp_spos_instance;
904 int i; 904 int i;
905 905
906 if (!ins)
907 return 0;
908
906 snd_info_free_entry(ins->proc_sym_info_entry); 909 snd_info_free_entry(ins->proc_sym_info_entry);
907 ins->proc_sym_info_entry = NULL; 910 ins->proc_sym_info_entry = NULL;
908 911
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 9174f1b3a987..1ec706ced75c 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -115,7 +115,8 @@ static int hda_codec_driver_probe(struct device *dev)
115 err = snd_hda_codec_build_controls(codec); 115 err = snd_hda_codec_build_controls(codec);
116 if (err < 0) 116 if (err < 0)
117 goto error_module; 117 goto error_module;
118 if (codec->card->registered) { 118 /* only register after the bus probe finished; otherwise it's racy */
119 if (!codec->bus->bus_probing && codec->card->registered) {
119 err = snd_card_register(codec->card); 120 err = snd_card_register(codec->card);
120 if (err < 0) 121 if (err < 0)
121 goto error_module; 122 goto error_module;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e784130ea4e0..e5c49003e75f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2185,6 +2185,7 @@ static int azx_probe_continue(struct azx *chip)
2185 int dev = chip->dev_index; 2185 int dev = chip->dev_index;
2186 int err; 2186 int err;
2187 2187
2188 to_hda_bus(bus)->bus_probing = 1;
2188 hda->probe_continued = 1; 2189 hda->probe_continued = 1;
2189 2190
2190 /* bind with i915 if needed */ 2191 /* bind with i915 if needed */
@@ -2269,6 +2270,7 @@ out_free:
2269 if (err < 0) 2270 if (err < 0)
2270 hda->init_failed = 1; 2271 hda->init_failed = 1;
2271 complete_all(&hda->probe_wait); 2272 complete_all(&hda->probe_wait);
2273 to_hda_bus(bus)->bus_probing = 0;
2272 return err; 2274 return err;
2273} 2275}
2274 2276
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index e5bdbc245682..29882bda7632 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -8451,8 +8451,10 @@ static void ca0132_free(struct hda_codec *codec)
8451 ca0132_exit_chip(codec); 8451 ca0132_exit_chip(codec);
8452 8452
8453 snd_hda_power_down(codec); 8453 snd_hda_power_down(codec);
8454 if (IS_ENABLED(CONFIG_PCI) && spec->mem_base) 8454#ifdef CONFIG_PCI
8455 if (spec->mem_base)
8455 pci_iounmap(codec->bus->pci, spec->mem_base); 8456 pci_iounmap(codec->bus->pci, spec->mem_base);
8457#endif
8456 kfree(spec->spec_init_verbs); 8458 kfree(spec->spec_init_verbs);
8457 kfree(codec->spec); 8459 kfree(codec->spec);
8458} 8460}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 51cc6589443f..a4ee7656d9ee 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -924,6 +924,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
924 SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), 924 SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
925 SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), 925 SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
926 SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), 926 SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
927 SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
927 SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), 928 SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
928 SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), 929 SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
929 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), 930 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
@@ -931,6 +932,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
931 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), 932 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
932 SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), 933 SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
933 SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), 934 SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
935 SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
934 SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), 936 SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
935 SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), 937 SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
936 SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), 938 SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index aee4cbd29d53..1ffa36e987b4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -117,6 +117,7 @@ struct alc_spec {
117 int codec_variant; /* flag for other variants */ 117 int codec_variant; /* flag for other variants */
118 unsigned int has_alc5505_dsp:1; 118 unsigned int has_alc5505_dsp:1;
119 unsigned int no_depop_delay:1; 119 unsigned int no_depop_delay:1;
120 unsigned int done_hp_init:1;
120 121
121 /* for PLL fix */ 122 /* for PLL fix */
122 hda_nid_t pll_nid; 123 hda_nid_t pll_nid;
@@ -514,6 +515,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
514 } 515 }
515} 516}
516 517
518/* get a primary headphone pin if available */
519static hda_nid_t alc_get_hp_pin(struct alc_spec *spec)
520{
521 if (spec->gen.autocfg.hp_pins[0])
522 return spec->gen.autocfg.hp_pins[0];
523 if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
524 return spec->gen.autocfg.line_out_pins[0];
525 return 0;
526}
517 527
518/* 528/*
519 * Realtek SSID verification 529 * Realtek SSID verification
@@ -724,9 +734,7 @@ do_sku:
724 * 15 : 1 --> enable the function "Mute internal speaker 734 * 15 : 1 --> enable the function "Mute internal speaker
725 * when the external headphone out jack is plugged" 735 * when the external headphone out jack is plugged"
726 */ 736 */
727 if (!spec->gen.autocfg.hp_pins[0] && 737 if (!alc_get_hp_pin(spec)) {
728 !(spec->gen.autocfg.line_out_pins[0] &&
729 spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
730 hda_nid_t nid; 738 hda_nid_t nid;
731 tmp = (ass >> 11) & 0x3; /* HP to chassis */ 739 tmp = (ass >> 11) & 0x3; /* HP to chassis */
732 nid = ports[tmp]; 740 nid = ports[tmp];
@@ -1847,6 +1855,8 @@ enum {
1847 ALC887_FIXUP_BASS_CHMAP, 1855 ALC887_FIXUP_BASS_CHMAP,
1848 ALC1220_FIXUP_GB_DUAL_CODECS, 1856 ALC1220_FIXUP_GB_DUAL_CODECS,
1849 ALC1220_FIXUP_CLEVO_P950, 1857 ALC1220_FIXUP_CLEVO_P950,
1858 ALC1220_FIXUP_SYSTEM76_ORYP5,
1859 ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
1850}; 1860};
1851 1861
1852static void alc889_fixup_coef(struct hda_codec *codec, 1862static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2048,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
2048 snd_hda_override_conn_list(codec, 0x1b, 1, conn1); 2058 snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
2049} 2059}
2050 2060
2061static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
2062 const struct hda_fixup *fix, int action);
2063
2064static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
2065 const struct hda_fixup *fix,
2066 int action)
2067{
2068 alc1220_fixup_clevo_p950(codec, fix, action);
2069 alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
2070}
2071
2051static const struct hda_fixup alc882_fixups[] = { 2072static const struct hda_fixup alc882_fixups[] = {
2052 [ALC882_FIXUP_ABIT_AW9D_MAX] = { 2073 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
2053 .type = HDA_FIXUP_PINS, 2074 .type = HDA_FIXUP_PINS,
@@ -2292,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
2292 .type = HDA_FIXUP_FUNC, 2313 .type = HDA_FIXUP_FUNC,
2293 .v.func = alc1220_fixup_clevo_p950, 2314 .v.func = alc1220_fixup_clevo_p950,
2294 }, 2315 },
2316 [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
2317 .type = HDA_FIXUP_FUNC,
2318 .v.func = alc1220_fixup_system76_oryp5,
2319 },
2320 [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
2321 .type = HDA_FIXUP_PINS,
2322 .v.pins = (const struct hda_pintbl[]) {
2323 { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
2324 {}
2325 },
2326 .chained = true,
2327 .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
2328 },
2295}; 2329};
2296 2330
2297static const struct snd_pci_quirk alc882_fixup_tbl[] = { 2331static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2368,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2368 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), 2402 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
2369 SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), 2403 SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
2370 SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), 2404 SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
2405 SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
2406 SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
2371 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), 2407 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
2372 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), 2408 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
2373 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), 2409 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -2958,7 +2994,7 @@ static void alc282_restore_default_value(struct hda_codec *codec)
2958static void alc282_init(struct hda_codec *codec) 2994static void alc282_init(struct hda_codec *codec)
2959{ 2995{
2960 struct alc_spec *spec = codec->spec; 2996 struct alc_spec *spec = codec->spec;
2961 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 2997 hda_nid_t hp_pin = alc_get_hp_pin(spec);
2962 bool hp_pin_sense; 2998 bool hp_pin_sense;
2963 int coef78; 2999 int coef78;
2964 3000
@@ -2995,7 +3031,7 @@ static void alc282_init(struct hda_codec *codec)
2995static void alc282_shutup(struct hda_codec *codec) 3031static void alc282_shutup(struct hda_codec *codec)
2996{ 3032{
2997 struct alc_spec *spec = codec->spec; 3033 struct alc_spec *spec = codec->spec;
2998 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3034 hda_nid_t hp_pin = alc_get_hp_pin(spec);
2999 bool hp_pin_sense; 3035 bool hp_pin_sense;
3000 int coef78; 3036 int coef78;
3001 3037
@@ -3073,14 +3109,9 @@ static void alc283_restore_default_value(struct hda_codec *codec)
3073static void alc283_init(struct hda_codec *codec) 3109static void alc283_init(struct hda_codec *codec)
3074{ 3110{
3075 struct alc_spec *spec = codec->spec; 3111 struct alc_spec *spec = codec->spec;
3076 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3112 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3077 bool hp_pin_sense; 3113 bool hp_pin_sense;
3078 3114
3079 if (!spec->gen.autocfg.hp_outs) {
3080 if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
3081 hp_pin = spec->gen.autocfg.line_out_pins[0];
3082 }
3083
3084 alc283_restore_default_value(codec); 3115 alc283_restore_default_value(codec);
3085 3116
3086 if (!hp_pin) 3117 if (!hp_pin)
@@ -3114,14 +3145,9 @@ static void alc283_init(struct hda_codec *codec)
3114static void alc283_shutup(struct hda_codec *codec) 3145static void alc283_shutup(struct hda_codec *codec)
3115{ 3146{
3116 struct alc_spec *spec = codec->spec; 3147 struct alc_spec *spec = codec->spec;
3117 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3148 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3118 bool hp_pin_sense; 3149 bool hp_pin_sense;
3119 3150
3120 if (!spec->gen.autocfg.hp_outs) {
3121 if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
3122 hp_pin = spec->gen.autocfg.line_out_pins[0];
3123 }
3124
3125 if (!hp_pin) { 3151 if (!hp_pin) {
3126 alc269_shutup(codec); 3152 alc269_shutup(codec);
3127 return; 3153 return;
@@ -3155,7 +3181,7 @@ static void alc283_shutup(struct hda_codec *codec)
3155static void alc256_init(struct hda_codec *codec) 3181static void alc256_init(struct hda_codec *codec)
3156{ 3182{
3157 struct alc_spec *spec = codec->spec; 3183 struct alc_spec *spec = codec->spec;
3158 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3184 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3159 bool hp_pin_sense; 3185 bool hp_pin_sense;
3160 3186
3161 if (!hp_pin) 3187 if (!hp_pin)
@@ -3191,7 +3217,7 @@ static void alc256_init(struct hda_codec *codec)
3191static void alc256_shutup(struct hda_codec *codec) 3217static void alc256_shutup(struct hda_codec *codec)
3192{ 3218{
3193 struct alc_spec *spec = codec->spec; 3219 struct alc_spec *spec = codec->spec;
3194 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3220 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3195 bool hp_pin_sense; 3221 bool hp_pin_sense;
3196 3222
3197 if (!hp_pin) { 3223 if (!hp_pin) {
@@ -3227,7 +3253,7 @@ static void alc256_shutup(struct hda_codec *codec)
3227static void alc225_init(struct hda_codec *codec) 3253static void alc225_init(struct hda_codec *codec)
3228{ 3254{
3229 struct alc_spec *spec = codec->spec; 3255 struct alc_spec *spec = codec->spec;
3230 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3256 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3231 bool hp1_pin_sense, hp2_pin_sense; 3257 bool hp1_pin_sense, hp2_pin_sense;
3232 3258
3233 if (!hp_pin) 3259 if (!hp_pin)
@@ -3270,7 +3296,7 @@ static void alc225_init(struct hda_codec *codec)
3270static void alc225_shutup(struct hda_codec *codec) 3296static void alc225_shutup(struct hda_codec *codec)
3271{ 3297{
3272 struct alc_spec *spec = codec->spec; 3298 struct alc_spec *spec = codec->spec;
3273 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3299 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3274 bool hp1_pin_sense, hp2_pin_sense; 3300 bool hp1_pin_sense, hp2_pin_sense;
3275 3301
3276 if (!hp_pin) { 3302 if (!hp_pin) {
@@ -3314,7 +3340,7 @@ static void alc225_shutup(struct hda_codec *codec)
3314static void alc_default_init(struct hda_codec *codec) 3340static void alc_default_init(struct hda_codec *codec)
3315{ 3341{
3316 struct alc_spec *spec = codec->spec; 3342 struct alc_spec *spec = codec->spec;
3317 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3343 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3318 bool hp_pin_sense; 3344 bool hp_pin_sense;
3319 3345
3320 if (!hp_pin) 3346 if (!hp_pin)
@@ -3343,7 +3369,7 @@ static void alc_default_init(struct hda_codec *codec)
3343static void alc_default_shutup(struct hda_codec *codec) 3369static void alc_default_shutup(struct hda_codec *codec)
3344{ 3370{
3345 struct alc_spec *spec = codec->spec; 3371 struct alc_spec *spec = codec->spec;
3346 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3372 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3347 bool hp_pin_sense; 3373 bool hp_pin_sense;
3348 3374
3349 if (!hp_pin) { 3375 if (!hp_pin) {
@@ -3372,6 +3398,48 @@ static void alc_default_shutup(struct hda_codec *codec)
3372 snd_hda_shutup_pins(codec); 3398 snd_hda_shutup_pins(codec);
3373} 3399}
3374 3400
3401static void alc294_hp_init(struct hda_codec *codec)
3402{
3403 struct alc_spec *spec = codec->spec;
3404 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3405 int i, val;
3406
3407 if (!hp_pin)
3408 return;
3409
3410 snd_hda_codec_write(codec, hp_pin, 0,
3411 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
3412
3413 msleep(100);
3414
3415 snd_hda_codec_write(codec, hp_pin, 0,
3416 AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
3417
3418 alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
3419 alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
3420
3421 /* Wait for depop procedure finish */
3422 val = alc_read_coefex_idx(codec, 0x58, 0x01);
3423 for (i = 0; i < 20 && val & 0x0080; i++) {
3424 msleep(50);
3425 val = alc_read_coefex_idx(codec, 0x58, 0x01);
3426 }
3427 /* Set HP depop to auto mode */
3428 alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
3429 msleep(50);
3430}
3431
3432static void alc294_init(struct hda_codec *codec)
3433{
3434 struct alc_spec *spec = codec->spec;
3435
3436 if (!spec->done_hp_init) {
3437 alc294_hp_init(codec);
3438 spec->done_hp_init = true;
3439 }
3440 alc_default_init(codec);
3441}
3442
3375static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg, 3443static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
3376 unsigned int val) 3444 unsigned int val)
3377{ 3445{
@@ -4102,6 +4170,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
4102 case 0x10ec0295: 4170 case 0x10ec0295:
4103 case 0x10ec0289: 4171 case 0x10ec0289:
4104 case 0x10ec0299: 4172 case 0x10ec0299:
4173 alc_process_coef_fw(codec, alc225_pre_hsmode);
4105 alc_process_coef_fw(codec, coef0225); 4174 alc_process_coef_fw(codec, coef0225);
4106 break; 4175 break;
4107 case 0x10ec0867: 4176 case 0x10ec0867:
@@ -4736,7 +4805,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
4736 struct alc_spec *spec = codec->spec; 4805 struct alc_spec *spec = codec->spec;
4737 4806
4738 hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]]; 4807 hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
4739 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 4808 hda_nid_t hp_pin = alc_get_hp_pin(spec);
4740 4809
4741 int new_headset_mode; 4810 int new_headset_mode;
4742 4811
@@ -5015,7 +5084,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
5015static void alc_shutup_dell_xps13(struct hda_codec *codec) 5084static void alc_shutup_dell_xps13(struct hda_codec *codec)
5016{ 5085{
5017 struct alc_spec *spec = codec->spec; 5086 struct alc_spec *spec = codec->spec;
5018 int hp_pin = spec->gen.autocfg.hp_pins[0]; 5087 int hp_pin = alc_get_hp_pin(spec);
5019 5088
5020 /* Prevent pop noises when headphones are plugged in */ 5089 /* Prevent pop noises when headphones are plugged in */
5021 snd_hda_codec_write(codec, hp_pin, 0, 5090 snd_hda_codec_write(codec, hp_pin, 0,
@@ -5108,7 +5177,7 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
5108 5177
5109 if (action == HDA_FIXUP_ACT_PROBE) { 5178 if (action == HDA_FIXUP_ACT_PROBE) {
5110 int mic_pin = find_ext_mic_pin(codec); 5179 int mic_pin = find_ext_mic_pin(codec);
5111 int hp_pin = spec->gen.autocfg.hp_pins[0]; 5180 int hp_pin = alc_get_hp_pin(spec);
5112 5181
5113 if (snd_BUG_ON(!mic_pin || !hp_pin)) 5182 if (snd_BUG_ON(!mic_pin || !hp_pin))
5114 return; 5183 return;
@@ -5440,6 +5509,13 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
5440 } 5509 }
5441} 5510}
5442 5511
5512static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
5513 const struct hda_fixup *fix, int action)
5514{
5515 if (action == HDA_FIXUP_ACT_PRE_PROBE)
5516 snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
5517}
5518
5443/* for hda_fixup_thinkpad_acpi() */ 5519/* for hda_fixup_thinkpad_acpi() */
5444#include "thinkpad_helper.c" 5520#include "thinkpad_helper.c"
5445 5521
@@ -5549,6 +5625,7 @@ enum {
5549 ALC293_FIXUP_LENOVO_SPK_NOISE, 5625 ALC293_FIXUP_LENOVO_SPK_NOISE,
5550 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, 5626 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
5551 ALC255_FIXUP_DELL_SPK_NOISE, 5627 ALC255_FIXUP_DELL_SPK_NOISE,
5628 ALC225_FIXUP_DISABLE_MIC_VREF,
5552 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, 5629 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5553 ALC295_FIXUP_DISABLE_DAC3, 5630 ALC295_FIXUP_DISABLE_DAC3,
5554 ALC280_FIXUP_HP_HEADSET_MIC, 5631 ALC280_FIXUP_HP_HEADSET_MIC,
@@ -5582,6 +5659,8 @@ enum {
5582 ALC294_FIXUP_ASUS_HEADSET_MIC, 5659 ALC294_FIXUP_ASUS_HEADSET_MIC,
5583 ALC294_FIXUP_ASUS_SPK, 5660 ALC294_FIXUP_ASUS_SPK,
5584 ALC225_FIXUP_HEADSET_JACK, 5661 ALC225_FIXUP_HEADSET_JACK,
5662 ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
5663 ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
5585}; 5664};
5586 5665
5587static const struct hda_fixup alc269_fixups[] = { 5666static const struct hda_fixup alc269_fixups[] = {
@@ -6268,6 +6347,12 @@ static const struct hda_fixup alc269_fixups[] = {
6268 .chained = true, 6347 .chained = true,
6269 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE 6348 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
6270 }, 6349 },
6350 [ALC225_FIXUP_DISABLE_MIC_VREF] = {
6351 .type = HDA_FIXUP_FUNC,
6352 .v.func = alc_fixup_disable_mic_vref,
6353 .chained = true,
6354 .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
6355 },
6271 [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = { 6356 [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
6272 .type = HDA_FIXUP_VERBS, 6357 .type = HDA_FIXUP_VERBS,
6273 .v.verbs = (const struct hda_verb[]) { 6358 .v.verbs = (const struct hda_verb[]) {
@@ -6277,7 +6362,7 @@ static const struct hda_fixup alc269_fixups[] = {
6277 {} 6362 {}
6278 }, 6363 },
6279 .chained = true, 6364 .chained = true,
6280 .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE 6365 .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF
6281 }, 6366 },
6282 [ALC280_FIXUP_HP_HEADSET_MIC] = { 6367 [ALC280_FIXUP_HP_HEADSET_MIC] = {
6283 .type = HDA_FIXUP_FUNC, 6368 .type = HDA_FIXUP_FUNC,
@@ -6522,6 +6607,26 @@ static const struct hda_fixup alc269_fixups[] = {
6522 .type = HDA_FIXUP_FUNC, 6607 .type = HDA_FIXUP_FUNC,
6523 .v.func = alc_fixup_headset_jack, 6608 .v.func = alc_fixup_headset_jack,
6524 }, 6609 },
6610 [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
6611 .type = HDA_FIXUP_PINS,
6612 .v.pins = (const struct hda_pintbl[]) {
6613 { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
6614 { }
6615 },
6616 .chained = true,
6617 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6618 },
6619 [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
6620 .type = HDA_FIXUP_VERBS,
6621 .v.verbs = (const struct hda_verb[]) {
6622 /* Disable PCBEEP-IN passthrough */
6623 { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
6624 { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
6625 { }
6626 },
6627 .chained = true,
6628 .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
6629 },
6525}; 6630};
6526 6631
6527static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6632static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6584,6 +6689,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6584 SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), 6689 SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
6585 SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), 6690 SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
6586 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), 6691 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
6692 SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6587 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6693 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6588 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6694 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6589 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 6695 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6699,6 +6805,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6699 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), 6805 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
6700 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), 6806 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
6701 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), 6807 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
6808 SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
6702 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), 6809 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
6703 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 6810 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
6704 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), 6811 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -6910,7 +7017,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
6910 {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, 7017 {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
6911 {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, 7018 {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
6912 {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, 7019 {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
6913 {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"}, 7020 {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
6914 {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"}, 7021 {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
6915 {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"}, 7022 {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
6916 {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"}, 7023 {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
@@ -7205,7 +7312,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7205 {0x12, 0x90a60130}, 7312 {0x12, 0x90a60130},
7206 {0x19, 0x03a11020}, 7313 {0x19, 0x03a11020},
7207 {0x21, 0x0321101f}), 7314 {0x21, 0x0321101f}),
7208 SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, 7315 SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
7209 {0x12, 0x90a60130}, 7316 {0x12, 0x90a60130},
7210 {0x14, 0x90170110}, 7317 {0x14, 0x90170110},
7211 {0x19, 0x04a11040}, 7318 {0x19, 0x04a11040},
@@ -7357,37 +7464,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
7357 alc_update_coef_idx(codec, 0x4, 0, 1<<11); 7464 alc_update_coef_idx(codec, 0x4, 0, 1<<11);
7358} 7465}
7359 7466
7360static void alc294_hp_init(struct hda_codec *codec)
7361{
7362 struct alc_spec *spec = codec->spec;
7363 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
7364 int i, val;
7365
7366 if (!hp_pin)
7367 return;
7368
7369 snd_hda_codec_write(codec, hp_pin, 0,
7370 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
7371
7372 msleep(100);
7373
7374 snd_hda_codec_write(codec, hp_pin, 0,
7375 AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
7376
7377 alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
7378 alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
7379
7380 /* Wait for depop procedure finish */
7381 val = alc_read_coefex_idx(codec, 0x58, 0x01);
7382 for (i = 0; i < 20 && val & 0x0080; i++) {
7383 msleep(50);
7384 val = alc_read_coefex_idx(codec, 0x58, 0x01);
7385 }
7386 /* Set HP depop to auto mode */
7387 alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
7388 msleep(50);
7389}
7390
7391/* 7467/*
7392 */ 7468 */
7393static int patch_alc269(struct hda_codec *codec) 7469static int patch_alc269(struct hda_codec *codec)
@@ -7513,7 +7589,7 @@ static int patch_alc269(struct hda_codec *codec)
7513 spec->codec_variant = ALC269_TYPE_ALC294; 7589 spec->codec_variant = ALC269_TYPE_ALC294;
7514 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ 7590 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
7515 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ 7591 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
7516 alc294_hp_init(codec); 7592 spec->init_hook = alc294_init;
7517 break; 7593 break;
7518 case 0x10ec0300: 7594 case 0x10ec0300:
7519 spec->codec_variant = ALC269_TYPE_ALC300; 7595 spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7525,7 +7601,7 @@ static int patch_alc269(struct hda_codec *codec)
7525 spec->codec_variant = ALC269_TYPE_ALC700; 7601 spec->codec_variant = ALC269_TYPE_ALC700;
7526 spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ 7602 spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
7527 alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ 7603 alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
7528 alc294_hp_init(codec); 7604 spec->init_hook = alc294_init;
7529 break; 7605 break;
7530 7606
7531 } 7607 }
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index 022a8912c8a2..3d58338fa3cf 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -611,14 +611,16 @@ static int acp3x_audio_probe(struct platform_device *pdev)
611 } 611 }
612 irqflags = *((unsigned int *)(pdev->dev.platform_data)); 612 irqflags = *((unsigned int *)(pdev->dev.platform_data));
613 613
614 adata = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dev_data),
615 GFP_KERNEL);
616 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 614 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
617 if (!res) { 615 if (!res) {
618 dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n"); 616 dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n");
619 return -ENODEV; 617 return -ENODEV;
620 } 618 }
621 619
620 adata = devm_kzalloc(&pdev->dev, sizeof(*adata), GFP_KERNEL);
621 if (!adata)
622 return -ENOMEM;
623
622 adata->acp3x_base = devm_ioremap(&pdev->dev, res->start, 624 adata->acp3x_base = devm_ioremap(&pdev->dev, res->start,
623 resource_size(res)); 625 resource_size(res));
624 626
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 3ab2949c1dfa..b19d7a3e7a2c 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1890,51 +1890,31 @@ static void hdmi_codec_remove(struct snd_soc_component *component)
1890 pm_runtime_disable(&hdev->dev); 1890 pm_runtime_disable(&hdev->dev);
1891} 1891}
1892 1892
1893#ifdef CONFIG_PM 1893#ifdef CONFIG_PM_SLEEP
1894static int hdmi_codec_prepare(struct device *dev) 1894static int hdmi_codec_resume(struct device *dev)
1895{
1896 struct hdac_device *hdev = dev_to_hdac_dev(dev);
1897
1898 pm_runtime_get_sync(&hdev->dev);
1899
1900 /*
1901 * Power down afg.
1902 * codec_read is preferred over codec_write to set the power state.
1903 * This way verb is send to set the power state and response
1904 * is received. So setting power state is ensured without using loop
1905 * to read the state.
1906 */
1907 snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
1908 AC_PWRST_D3);
1909
1910 return 0;
1911}
1912
1913static void hdmi_codec_complete(struct device *dev)
1914{ 1895{
1915 struct hdac_device *hdev = dev_to_hdac_dev(dev); 1896 struct hdac_device *hdev = dev_to_hdac_dev(dev);
1916 struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev); 1897 struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
1898 int ret;
1917 1899
1918 /* Power up afg */ 1900 ret = pm_runtime_force_resume(dev);
1919 snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE, 1901 if (ret < 0)
1920 AC_PWRST_D0); 1902 return ret;
1921
1922 hdac_hdmi_skl_enable_all_pins(hdev);
1923 hdac_hdmi_skl_enable_dp12(hdev);
1924
1925 /* 1903 /*
1926 * As the ELD notify callback request is not entertained while the 1904 * As the ELD notify callback request is not entertained while the
1927 * device is in suspend state. Need to manually check detection of 1905 * device is in suspend state. Need to manually check detection of
1928 * all pins here. pin capablity change is not support, so use the 1906 * all pins here. pin capablity change is not support, so use the
1929 * already set pin caps. 1907 * already set pin caps.
1908 *
1909 * NOTE: this is safe to call even if the codec doesn't actually resume.
1910 * The pin check involves only with DRM audio component hooks, so it
1911 * works even if the HD-audio side is still dreaming peacefully.
1930 */ 1912 */
1931 hdac_hdmi_present_sense_all_pins(hdev, hdmi, false); 1913 hdac_hdmi_present_sense_all_pins(hdev, hdmi, false);
1932 1914 return 0;
1933 pm_runtime_put_sync(&hdev->dev);
1934} 1915}
1935#else 1916#else
1936#define hdmi_codec_prepare NULL 1917#define hdmi_codec_resume NULL
1937#define hdmi_codec_complete NULL
1938#endif 1918#endif
1939 1919
1940static const struct snd_soc_component_driver hdmi_hda_codec = { 1920static const struct snd_soc_component_driver hdmi_hda_codec = {
@@ -2135,75 +2115,6 @@ static int hdac_hdmi_dev_remove(struct hdac_device *hdev)
2135} 2115}
2136 2116
2137#ifdef CONFIG_PM 2117#ifdef CONFIG_PM
2138/*
2139 * Power management sequences
2140 * ==========================
2141 *
2142 * The following explains the PM handling of HDAC HDMI with its parent
2143 * device SKL and display power usage
2144 *
2145 * Probe
2146 * -----
2147 * In SKL probe,
2148 * 1. skl_probe_work() powers up the display (refcount++ -> 1)
2149 * 2. enumerates the codecs on the link
2150 * 3. powers down the display (refcount-- -> 0)
2151 *
2152 * In HDAC HDMI probe,
2153 * 1. hdac_hdmi_dev_probe() powers up the display (refcount++ -> 1)
2154 * 2. probe the codec
2155 * 3. put the HDAC HDMI device to runtime suspend
2156 * 4. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
2157 *
2158 * Once children are runtime suspended, SKL device also goes to runtime
2159 * suspend
2160 *
2161 * HDMI Playback
2162 * -------------
2163 * Open HDMI device,
2164 * 1. skl_runtime_resume() invoked
2165 * 2. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1)
2166 *
2167 * Close HDMI device,
2168 * 1. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
2169 * 2. skl_runtime_suspend() invoked
2170 *
2171 * S0/S3 Cycle with playback in progress
2172 * -------------------------------------
2173 * When the device is opened for playback, the device is runtime active
2174 * already and the display refcount is 1 as explained above.
2175 *
2176 * Entering to S3,
2177 * 1. hdmi_codec_prepare() invoke the runtime resume of codec which just
2178 * increments the PM runtime usage count of the codec since the device
2179 * is in use already
2180 * 2. skl_suspend() powers down the display (refcount-- -> 0)
2181 *
2182 * Wakeup from S3,
2183 * 1. skl_resume() powers up the display (refcount++ -> 1)
2184 * 2. hdmi_codec_complete() invokes the runtime suspend of codec which just
2185 * decrements the PM runtime usage count of the codec since the device
2186 * is in use already
2187 *
2188 * Once playback is stopped, the display refcount is set to 0 as explained
2189 * above in the HDMI playback sequence. The PM handlings are designed in
2190 * such way that to balance the refcount of display power when the codec
2191 * device put to S3 while playback is going on.
2192 *
2193 * S0/S3 Cycle without playback in progress
2194 * ----------------------------------------
2195 * Entering to S3,
2196 * 1. hdmi_codec_prepare() invoke the runtime resume of codec
2197 * 2. skl_runtime_resume() invoked
2198 * 3. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1)
2199 * 4. skl_suspend() powers down the display (refcount-- -> 0)
2200 *
2201 * Wakeup from S3,
2202 * 1. skl_resume() powers up the display (refcount++ -> 1)
2203 * 2. hdmi_codec_complete() invokes the runtime suspend of codec
2204 * 3. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
2205 * 4. skl_runtime_suspend() invoked
2206 */
2207static int hdac_hdmi_runtime_suspend(struct device *dev) 2118static int hdac_hdmi_runtime_suspend(struct device *dev)
2208{ 2119{
2209 struct hdac_device *hdev = dev_to_hdac_dev(dev); 2120 struct hdac_device *hdev = dev_to_hdac_dev(dev);
@@ -2277,8 +2188,7 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
2277 2188
2278static const struct dev_pm_ops hdac_hdmi_pm = { 2189static const struct dev_pm_ops hdac_hdmi_pm = {
2279 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) 2190 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
2280 .prepare = hdmi_codec_prepare, 2191 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, hdmi_codec_resume)
2281 .complete = hdmi_codec_complete,
2282}; 2192};
2283 2193
2284static const struct hda_device_id hdmi_list[] = { 2194static const struct hda_device_id hdmi_list[] = {
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index d00734d31e04..e5b6769b9797 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
795 if (hcd->spdif) 795 if (hcd->spdif)
796 hcp->daidrv[i] = hdmi_spdif_dai; 796 hcp->daidrv[i] = hdmi_spdif_dai;
797 797
798 dev_set_drvdata(dev, hcp);
799
798 ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv, 800 ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv,
799 dai_count); 801 dai_count);
800 if (ret) { 802 if (ret) {
@@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
802 __func__, ret); 804 __func__, ret);
803 return ret; 805 return ret;
804 } 806 }
805
806 dev_set_drvdata(dev, hcp);
807 return 0; 807 return 0;
808} 808}
809 809
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 6cb1653be804..4cc24a5d5c31 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1400,24 +1400,20 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute)
1400 if (ret != 0) { 1400 if (ret != 0) {
1401 dev_err(component->dev, 1401 dev_err(component->dev,
1402 "Failed to set digital mute: %d\n", ret); 1402 "Failed to set digital mute: %d\n", ret);
1403 mutex_unlock(&pcm512x->mutex); 1403 goto unlock;
1404 return ret;
1405 } 1404 }
1406 1405
1407 regmap_read_poll_timeout(pcm512x->regmap, 1406 regmap_read_poll_timeout(pcm512x->regmap,
1408 PCM512x_ANALOG_MUTE_DET, 1407 PCM512x_ANALOG_MUTE_DET,
1409 mute_det, (mute_det & 0x3) == 0, 1408 mute_det, (mute_det & 0x3) == 0,
1410 200, 10000); 1409 200, 10000);
1411
1412 mutex_unlock(&pcm512x->mutex);
1413 } else { 1410 } else {
1414 pcm512x->mute &= ~0x1; 1411 pcm512x->mute &= ~0x1;
1415 ret = pcm512x_update_mute(pcm512x); 1412 ret = pcm512x_update_mute(pcm512x);
1416 if (ret != 0) { 1413 if (ret != 0) {
1417 dev_err(component->dev, 1414 dev_err(component->dev,
1418 "Failed to update digital mute: %d\n", ret); 1415 "Failed to update digital mute: %d\n", ret);
1419 mutex_unlock(&pcm512x->mutex); 1416 goto unlock;
1420 return ret;
1421 } 1417 }
1422 1418
1423 regmap_read_poll_timeout(pcm512x->regmap, 1419 regmap_read_poll_timeout(pcm512x->regmap,
@@ -1428,9 +1424,10 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute)
1428 200, 10000); 1424 200, 10000);
1429 } 1425 }
1430 1426
1427unlock:
1431 mutex_unlock(&pcm512x->mutex); 1428 mutex_unlock(&pcm512x->mutex);
1432 1429
1433 return 0; 1430 return ret;
1434} 1431}
1435 1432
1436static const struct snd_soc_dai_ops pcm512x_dai_ops = { 1433static const struct snd_soc_dai_ops pcm512x_dai_ops = {
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c
index 0ef966d56bac..e2855ab9a2c6 100644
--- a/sound/soc/codecs/rt274.c
+++ b/sound/soc/codecs/rt274.c
@@ -1128,8 +1128,11 @@ static int rt274_i2c_probe(struct i2c_client *i2c,
1128 return ret; 1128 return ret;
1129 } 1129 }
1130 1130
1131 regmap_read(rt274->regmap, 1131 ret = regmap_read(rt274->regmap,
1132 RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val); 1132 RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
1133 if (ret)
1134 return ret;
1135
1133 if (val != RT274_VENDOR_ID) { 1136 if (val != RT274_VENDOR_ID) {
1134 dev_err(&i2c->dev, 1137 dev_err(&i2c->dev,
1135 "Device with ID register %#x is not rt274\n", val); 1138 "Device with ID register %#x is not rt274\n", val);
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 4d46f4567c3a..bec2eefa8b0f 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -280,6 +280,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
280 280
281 rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp), 281 rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp),
282 GFP_KERNEL); 282 GFP_KERNEL);
283 if (!rt5514_dsp)
284 return -ENOMEM;
283 285
284 rt5514_dsp->dev = &rt5514_spi->dev; 286 rt5514_dsp->dev = &rt5514_spi->dev;
285 mutex_init(&rt5514_dsp->dma_lock); 287 mutex_init(&rt5514_dsp->dma_lock);
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 34cfaf8f6f34..a9b91bcfcc09 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -1778,7 +1778,9 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
1778 {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc}, 1778 {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc},
1779 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, 1779 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
1780 {"ADC STO1 ASRC", NULL, "AD ASRC"}, 1780 {"ADC STO1 ASRC", NULL, "AD ASRC"},
1781 {"ADC STO1 ASRC", NULL, "DA ASRC"},
1781 {"ADC STO1 ASRC", NULL, "CLKDET"}, 1782 {"ADC STO1 ASRC", NULL, "CLKDET"},
1783 {"DAC STO1 ASRC", NULL, "AD ASRC"},
1782 {"DAC STO1 ASRC", NULL, "DA ASRC"}, 1784 {"DAC STO1 ASRC", NULL, "DA ASRC"},
1783 {"DAC STO1 ASRC", NULL, "CLKDET"}, 1785 {"DAC STO1 ASRC", NULL, "CLKDET"},
1784 1786
@@ -2512,6 +2514,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
2512 regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000); 2514 regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000);
2513 regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000); 2515 regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000);
2514 regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005); 2516 regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005);
2517 regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4);
2515 2518
2516 mutex_unlock(&rt5682->calibrate_mutex); 2519 mutex_unlock(&rt5682->calibrate_mutex);
2517 2520
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index d82a8301fd74..96944cff0ed7 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -849,18 +849,18 @@
849#define RT5682_SCLK_SRC_PLL2 (0x2 << 13) 849#define RT5682_SCLK_SRC_PLL2 (0x2 << 13)
850#define RT5682_SCLK_SRC_SDW (0x3 << 13) 850#define RT5682_SCLK_SRC_SDW (0x3 << 13)
851#define RT5682_SCLK_SRC_RCCLK (0x4 << 13) 851#define RT5682_SCLK_SRC_RCCLK (0x4 << 13)
852#define RT5682_PLL1_SRC_MASK (0x3 << 10) 852#define RT5682_PLL2_SRC_MASK (0x3 << 10)
853#define RT5682_PLL1_SRC_SFT 10 853#define RT5682_PLL2_SRC_SFT 10
854#define RT5682_PLL1_SRC_MCLK (0x0 << 10) 854#define RT5682_PLL2_SRC_MCLK (0x0 << 10)
855#define RT5682_PLL1_SRC_BCLK1 (0x1 << 10) 855#define RT5682_PLL2_SRC_BCLK1 (0x1 << 10)
856#define RT5682_PLL1_SRC_SDW (0x2 << 10) 856#define RT5682_PLL2_SRC_SDW (0x2 << 10)
857#define RT5682_PLL1_SRC_RC (0x3 << 10) 857#define RT5682_PLL2_SRC_RC (0x3 << 10)
858#define RT5682_PLL2_SRC_MASK (0x3 << 8) 858#define RT5682_PLL1_SRC_MASK (0x3 << 8)
859#define RT5682_PLL2_SRC_SFT 8 859#define RT5682_PLL1_SRC_SFT 8
860#define RT5682_PLL2_SRC_MCLK (0x0 << 8) 860#define RT5682_PLL1_SRC_MCLK (0x0 << 8)
861#define RT5682_PLL2_SRC_BCLK1 (0x1 << 8) 861#define RT5682_PLL1_SRC_BCLK1 (0x1 << 8)
862#define RT5682_PLL2_SRC_SDW (0x2 << 8) 862#define RT5682_PLL1_SRC_SDW (0x2 << 8)
863#define RT5682_PLL2_SRC_RC (0x3 << 8) 863#define RT5682_PLL1_SRC_RC (0x3 << 8)
864 864
865 865
866 866
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index e2b5a11b16d1..f03195d2ab2e 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component,
822 case SND_SOC_BIAS_PREPARE: 822 case SND_SOC_BIAS_PREPARE:
823 break; 823 break;
824 case SND_SOC_BIAS_STANDBY: 824 case SND_SOC_BIAS_STANDBY:
825 /* Initial cold start */
826 if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
827 break;
828
825 /* Switch off BCLK_N Divider */ 829 /* Switch off BCLK_N Divider */
826 snd_soc_component_update_bits(component, AIC32X4_BCLKN, 830 snd_soc_component_update_bits(component, AIC32X4_BCLKN,
827 AIC32X4_BCLKEN, 0); 831 AIC32X4_BCLKEN, 0);
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 392d5eef356d..99e07b01a2ce 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
86 if (!buf) 86 if (!buf)
87 return -ENOMEM; 87 return -ENOMEM;
88 88
89 ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", 89 ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
90 pdcr, ptcr); 90 pdcr, ptcr);
91 91
92 if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR) 92 if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR)
93 ret += snprintf(buf + ret, PAGE_SIZE - ret, 93 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
94 "TxFS output from %s, ", 94 "TxFS output from %s, ",
95 audmux_port_string((ptcr >> 27) & 0x7)); 95 audmux_port_string((ptcr >> 27) & 0x7));
96 else 96 else
97 ret += snprintf(buf + ret, PAGE_SIZE - ret, 97 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
98 "TxFS input, "); 98 "TxFS input, ");
99 99
100 if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR) 100 if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR)
101 ret += snprintf(buf + ret, PAGE_SIZE - ret, 101 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
102 "TxClk output from %s", 102 "TxClk output from %s",
103 audmux_port_string((ptcr >> 22) & 0x7)); 103 audmux_port_string((ptcr >> 22) & 0x7));
104 else 104 else
105 ret += snprintf(buf + ret, PAGE_SIZE - ret, 105 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
106 "TxClk input"); 106 "TxClk input");
107 107
108 ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); 108 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
109 109
110 if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) { 110 if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) {
111 ret += snprintf(buf + ret, PAGE_SIZE - ret, 111 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
112 "Port is symmetric"); 112 "Port is symmetric");
113 } else { 113 } else {
114 if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR) 114 if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR)
115 ret += snprintf(buf + ret, PAGE_SIZE - ret, 115 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
116 "RxFS output from %s, ", 116 "RxFS output from %s, ",
117 audmux_port_string((ptcr >> 17) & 0x7)); 117 audmux_port_string((ptcr >> 17) & 0x7));
118 else 118 else
119 ret += snprintf(buf + ret, PAGE_SIZE - ret, 119 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
120 "RxFS input, "); 120 "RxFS input, ");
121 121
122 if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR) 122 if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR)
123 ret += snprintf(buf + ret, PAGE_SIZE - ret, 123 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
124 "RxClk output from %s", 124 "RxClk output from %s",
125 audmux_port_string((ptcr >> 12) & 0x7)); 125 audmux_port_string((ptcr >> 12) & 0x7));
126 else 126 else
127 ret += snprintf(buf + ret, PAGE_SIZE - ret, 127 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
128 "RxClk input"); 128 "RxClk input");
129 } 129 }
130 130
131 ret += snprintf(buf + ret, PAGE_SIZE - ret, 131 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
132 "\nData received from %s\n", 132 "\nData received from %s\n",
133 audmux_port_string((pdcr >> 13) & 0x7)); 133 audmux_port_string((pdcr >> 13) & 0x7));
134 134
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 37e001cf9cd1..3fe34417ec89 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -462,7 +462,7 @@ static int asoc_simple_card_parse_of(struct simple_card_data *priv)
462 conf_idx = 0; 462 conf_idx = 0;
463 node = of_get_child_by_name(top, PREFIX "dai-link"); 463 node = of_get_child_by_name(top, PREFIX "dai-link");
464 if (!node) { 464 if (!node) {
465 node = dev->of_node; 465 node = of_node_get(top);
466 loop = 0; 466 loop = 0;
467 } 467 }
468 468
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 99a62ba409df..bd9fd2035c55 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -91,7 +91,7 @@ config SND_SST_ATOM_HIFI2_PLATFORM_PCI
91config SND_SST_ATOM_HIFI2_PLATFORM_ACPI 91config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
92 tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" 92 tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms"
93 default ACPI 93 default ACPI
94 depends on X86 && ACPI 94 depends on X86 && ACPI && PCI
95 select SND_SST_IPC_ACPI 95 select SND_SST_IPC_ACPI
96 select SND_SST_ATOM_HIFI2_PLATFORM 96 select SND_SST_ATOM_HIFI2_PLATFORM
97 select SND_SOC_ACPI_INTEL_MATCH 97 select SND_SOC_ACPI_INTEL_MATCH
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index afc559866095..91a2436ce952 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
399 struct snd_pcm_hw_params *params, 399 struct snd_pcm_hw_params *params,
400 struct snd_soc_dai *dai) 400 struct snd_soc_dai *dai)
401{ 401{
402 snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); 402 int ret;
403
404 ret =
405 snd_pcm_lib_malloc_pages(substream,
406 params_buffer_bytes(params));
407 if (ret)
408 return ret;
403 memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); 409 memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
404 return 0; 410 return 0;
405} 411}
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 68e6543e6cb0..99f2a0156ae8 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -192,7 +192,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
192 .stream_name = "Loopback", 192 .stream_name = "Loopback",
193 .cpu_dai_name = "Loopback Pin", 193 .cpu_dai_name = "Loopback Pin",
194 .platform_name = "haswell-pcm-audio", 194 .platform_name = "haswell-pcm-audio",
195 .dynamic = 0, 195 .dynamic = 1,
196 .codec_name = "snd-soc-dummy", 196 .codec_name = "snd-soc-dummy",
197 .codec_dai_name = "snd-soc-dummy-dai", 197 .codec_dai_name = "snd-soc-dummy-dai",
198 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 198 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index c74c4f17316f..8f83b182c4f9 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -55,39 +55,6 @@ enum {
55 GLK_DPCM_AUDIO_HDMI3_PB, 55 GLK_DPCM_AUDIO_HDMI3_PB,
56}; 56};
57 57
58static int platform_clock_control(struct snd_soc_dapm_widget *w,
59 struct snd_kcontrol *k, int event)
60{
61 struct snd_soc_dapm_context *dapm = w->dapm;
62 struct snd_soc_card *card = dapm->card;
63 struct snd_soc_dai *codec_dai;
64 int ret = 0;
65
66 codec_dai = snd_soc_card_get_codec_dai(card, GLK_REALTEK_CODEC_DAI);
67 if (!codec_dai) {
68 dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
69 return -EIO;
70 }
71
72 if (SND_SOC_DAPM_EVENT_OFF(event)) {
73 ret = snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0);
74 if (ret)
75 dev_err(card->dev, "failed to stop sysclk: %d\n", ret);
76 } else if (SND_SOC_DAPM_EVENT_ON(event)) {
77 ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK,
78 GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ);
79 if (ret < 0) {
80 dev_err(card->dev, "can't set codec pll: %d\n", ret);
81 return ret;
82 }
83 }
84
85 if (ret)
86 dev_err(card->dev, "failed to start internal clk: %d\n", ret);
87
88 return ret;
89}
90
91static const struct snd_kcontrol_new geminilake_controls[] = { 58static const struct snd_kcontrol_new geminilake_controls[] = {
92 SOC_DAPM_PIN_SWITCH("Headphone Jack"), 59 SOC_DAPM_PIN_SWITCH("Headphone Jack"),
93 SOC_DAPM_PIN_SWITCH("Headset Mic"), 60 SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -102,14 +69,10 @@ static const struct snd_soc_dapm_widget geminilake_widgets[] = {
102 SND_SOC_DAPM_SPK("HDMI1", NULL), 69 SND_SOC_DAPM_SPK("HDMI1", NULL),
103 SND_SOC_DAPM_SPK("HDMI2", NULL), 70 SND_SOC_DAPM_SPK("HDMI2", NULL),
104 SND_SOC_DAPM_SPK("HDMI3", NULL), 71 SND_SOC_DAPM_SPK("HDMI3", NULL),
105 SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
106 platform_clock_control, SND_SOC_DAPM_PRE_PMU |
107 SND_SOC_DAPM_POST_PMD),
108}; 72};
109 73
110static const struct snd_soc_dapm_route geminilake_map[] = { 74static const struct snd_soc_dapm_route geminilake_map[] = {
111 /* HP jack connectors - unknown if we have jack detection */ 75 /* HP jack connectors - unknown if we have jack detection */
112 { "Headphone Jack", NULL, "Platform Clock" },
113 { "Headphone Jack", NULL, "HPOL" }, 76 { "Headphone Jack", NULL, "HPOL" },
114 { "Headphone Jack", NULL, "HPOR" }, 77 { "Headphone Jack", NULL, "HPOR" },
115 78
@@ -117,7 +80,6 @@ static const struct snd_soc_dapm_route geminilake_map[] = {
117 { "Spk", NULL, "Speaker" }, 80 { "Spk", NULL, "Speaker" },
118 81
119 /* other jacks */ 82 /* other jacks */
120 { "Headset Mic", NULL, "Platform Clock" },
121 { "IN1P", NULL, "Headset Mic" }, 83 { "IN1P", NULL, "Headset Mic" },
122 84
123 /* digital mics */ 85 /* digital mics */
@@ -177,6 +139,13 @@ static int geminilake_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
177 struct snd_soc_jack *jack; 139 struct snd_soc_jack *jack;
178 int ret; 140 int ret;
179 141
142 ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK,
143 GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ);
144 if (ret < 0) {
145 dev_err(rtd->dev, "can't set codec pll: %d\n", ret);
146 return ret;
147 }
148
180 /* Configure sysclk for codec */ 149 /* Configure sysclk for codec */
181 ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1, 150 ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1,
182 RT5682_PLL_FREQ, SND_SOC_CLOCK_IN); 151 RT5682_PLL_FREQ, SND_SOC_CLOCK_IN);
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index eab1f439dd3f..a4022983a7ce 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -146,7 +146,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = {
146 .stream_name = "Loopback", 146 .stream_name = "Loopback",
147 .cpu_dai_name = "Loopback Pin", 147 .cpu_dai_name = "Loopback Pin",
148 .platform_name = "haswell-pcm-audio", 148 .platform_name = "haswell-pcm-audio",
149 .dynamic = 0, 149 .dynamic = 1,
150 .codec_name = "snd-soc-dummy", 150 .codec_name = "snd-soc-dummy",
151 .codec_dai_name = "snd-soc-dummy-dai", 151 .codec_dai_name = "snd-soc-dummy-dai",
152 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 152 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 60c94836bf5b..4ed5b7e17d44 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -336,9 +336,6 @@ static int skl_suspend(struct device *dev)
336 skl->skl_sst->fw_loaded = false; 336 skl->skl_sst->fw_loaded = false;
337 } 337 }
338 338
339 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
340 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
341
342 return 0; 339 return 0;
343} 340}
344 341
@@ -350,10 +347,6 @@ static int skl_resume(struct device *dev)
350 struct hdac_ext_link *hlink = NULL; 347 struct hdac_ext_link *hlink = NULL;
351 int ret; 348 int ret;
352 349
353 /* Turned OFF in HDMI codec driver after codec reconfiguration */
354 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
355 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
356
357 /* 350 /*
358 * resume only when we are not in suspend active, otherwise need to 351 * resume only when we are not in suspend active, otherwise need to
359 * restore the device 352 * restore the device
@@ -446,8 +439,10 @@ static int skl_free(struct hdac_bus *bus)
446 snd_hdac_ext_bus_exit(bus); 439 snd_hdac_ext_bus_exit(bus);
447 440
448 cancel_work_sync(&skl->probe_work); 441 cancel_work_sync(&skl->probe_work);
449 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) 442 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
443 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
450 snd_hdac_i915_exit(bus); 444 snd_hdac_i915_exit(bus);
445 }
451 446
452 return 0; 447 return 0;
453} 448}
@@ -814,7 +809,7 @@ static void skl_probe_work(struct work_struct *work)
814 err = skl_platform_register(bus->dev); 809 err = skl_platform_register(bus->dev);
815 if (err < 0) { 810 if (err < 0) {
816 dev_err(bus->dev, "platform register failed: %d\n", err); 811 dev_err(bus->dev, "platform register failed: %d\n", err);
817 return; 812 goto out_err;
818 } 813 }
819 814
820 err = skl_machine_device_register(skl); 815 err = skl_machine_device_register(skl);
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 5b986b74dd36..548eb4fa2da6 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -570,10 +570,10 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
570 prtd->audio_client = q6asm_audio_client_alloc(dev, 570 prtd->audio_client = q6asm_audio_client_alloc(dev,
571 (q6asm_cb)compress_event_handler, 571 (q6asm_cb)compress_event_handler,
572 prtd, stream_id, LEGACY_PCM_MODE); 572 prtd, stream_id, LEGACY_PCM_MODE);
573 if (!prtd->audio_client) { 573 if (IS_ERR(prtd->audio_client)) {
574 dev_err(dev, "Could not allocate memory\n"); 574 dev_err(dev, "Could not allocate memory\n");
575 kfree(prtd); 575 ret = PTR_ERR(prtd->audio_client);
576 return -ENOMEM; 576 goto free_prtd;
577 } 577 }
578 578
579 size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE * 579 size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE *
@@ -582,7 +582,7 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
582 &prtd->dma_buffer); 582 &prtd->dma_buffer);
583 if (ret) { 583 if (ret) {
584 dev_err(dev, "Cannot allocate buffer(s)\n"); 584 dev_err(dev, "Cannot allocate buffer(s)\n");
585 return ret; 585 goto free_client;
586 } 586 }
587 587
588 if (pdata->sid < 0) 588 if (pdata->sid < 0)
@@ -595,6 +595,13 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
595 runtime->private_data = prtd; 595 runtime->private_data = prtd;
596 596
597 return 0; 597 return 0;
598
599free_client:
600 q6asm_audio_client_free(prtd->audio_client);
601free_prtd:
602 kfree(prtd);
603
604 return ret;
598} 605}
599 606
600static int q6asm_dai_compr_free(struct snd_compr_stream *stream) 607static int q6asm_dai_compr_free(struct snd_compr_stream *stream)
@@ -874,7 +881,7 @@ static int of_q6asm_parse_dai_data(struct device *dev,
874 881
875 for_each_child_of_node(dev->of_node, node) { 882 for_each_child_of_node(dev->of_node, node) {
876 ret = of_property_read_u32(node, "reg", &id); 883 ret = of_property_read_u32(node, "reg", &id);
877 if (ret || id > MAX_SESSIONS || id < 0) { 884 if (ret || id >= MAX_SESSIONS || id < 0) {
878 dev_err(dev, "valid dai id not found:%d\n", ret); 885 dev_err(dev, "valid dai id not found:%d\n", ret);
879 continue; 886 continue;
880 } 887 }
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 1db8ef668223..6f66a58e23ca 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -158,17 +158,24 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
158 return ret; 158 return ret;
159} 159}
160 160
161static void sdm845_jack_free(struct snd_jack *jack)
162{
163 struct snd_soc_component *component = jack->private_data;
164
165 snd_soc_component_set_jack(component, NULL, NULL);
166}
167
161static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd) 168static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
162{ 169{
163 struct snd_soc_component *component; 170 struct snd_soc_component *component;
164 struct snd_soc_dai_link *dai_link = rtd->dai_link;
165 struct snd_soc_card *card = rtd->card; 171 struct snd_soc_card *card = rtd->card;
172 struct snd_soc_dai *codec_dai = rtd->codec_dai;
173 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
166 struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card); 174 struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card);
167 int i, rval; 175 struct snd_jack *jack;
176 int rval;
168 177
169 if (!pdata->jack_setup) { 178 if (!pdata->jack_setup) {
170 struct snd_jack *jack;
171
172 rval = snd_soc_card_jack_new(card, "Headset Jack", 179 rval = snd_soc_card_jack_new(card, "Headset Jack",
173 SND_JACK_HEADSET | 180 SND_JACK_HEADSET |
174 SND_JACK_HEADPHONE | 181 SND_JACK_HEADPHONE |
@@ -190,16 +197,22 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
190 pdata->jack_setup = true; 197 pdata->jack_setup = true;
191 } 198 }
192 199
193 for (i = 0 ; i < dai_link->num_codecs; i++) { 200 switch (cpu_dai->id) {
194 struct snd_soc_dai *dai = rtd->codec_dais[i]; 201 case PRIMARY_MI2S_RX:
202 jack = pdata->jack.jack;
203 component = codec_dai->component;
195 204
196 component = dai->component; 205 jack->private_data = component;
197 rval = snd_soc_component_set_jack( 206 jack->private_free = sdm845_jack_free;
198 component, &pdata->jack, NULL); 207 rval = snd_soc_component_set_jack(component,
208 &pdata->jack, NULL);
199 if (rval != 0 && rval != -ENOTSUPP) { 209 if (rval != 0 && rval != -ENOTSUPP) {
200 dev_warn(card->dev, "Failed to set jack: %d\n", rval); 210 dev_warn(card->dev, "Failed to set jack: %d\n", rval);
201 return rval; 211 return rval;
202 } 212 }
213 break;
214 default:
215 break;
203 } 216 }
204 217
205 return 0; 218 return 0;
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index d6c62aa13041..d4bde4834ce5 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -604,6 +604,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
604 unsigned int fmt) 604 unsigned int fmt)
605{ 605{
606 struct i2s_dai *i2s = to_info(dai); 606 struct i2s_dai *i2s = to_info(dai);
607 struct i2s_dai *other = get_other_dai(i2s);
607 int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave; 608 int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
608 u32 mod, tmp = 0; 609 u32 mod, tmp = 0;
609 unsigned long flags; 610 unsigned long flags;
@@ -661,7 +662,8 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
661 * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any 662 * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
662 * clock configuration assigned in DT is not overwritten. 663 * clock configuration assigned in DT is not overwritten.
663 */ 664 */
664 if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL) 665 if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL &&
666 other->clk_data.clks == NULL)
665 i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, 667 i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
666 0, SND_SOC_CLOCK_IN); 668 0, SND_SOC_CLOCK_IN);
667 break; 669 break;
@@ -699,7 +701,9 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
699 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) 701 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
700{ 702{
701 struct i2s_dai *i2s = to_info(dai); 703 struct i2s_dai *i2s = to_info(dai);
704 struct i2s_dai *other = get_other_dai(i2s);
702 u32 mod, mask = 0, val = 0; 705 u32 mod, mask = 0, val = 0;
706 struct clk *rclksrc;
703 unsigned long flags; 707 unsigned long flags;
704 708
705 WARN_ON(!pm_runtime_active(dai->dev)); 709 WARN_ON(!pm_runtime_active(dai->dev));
@@ -782,6 +786,13 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
782 786
783 i2s->frmclk = params_rate(params); 787 i2s->frmclk = params_rate(params);
784 788
789 rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
790 if (!rclksrc || IS_ERR(rclksrc))
791 rclksrc = other->clk_table[CLK_I2S_RCLK_SRC];
792
793 if (rclksrc && !IS_ERR(rclksrc))
794 i2s->rclk_srcrate = clk_get_rate(rclksrc);
795
785 return 0; 796 return 0;
786} 797}
787 798
@@ -886,11 +897,6 @@ static int config_setup(struct i2s_dai *i2s)
886 return 0; 897 return 0;
887 898
888 if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { 899 if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
889 struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
890
891 if (rclksrc && !IS_ERR(rclksrc))
892 i2s->rclk_srcrate = clk_get_rate(rclksrc);
893
894 psr = i2s->rclk_srcrate / i2s->frmclk / rfs; 900 psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
895 writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); 901 writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
896 dev_dbg(&i2s->pdev->dev, 902 dev_dbg(&i2s->pdev->dev,
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index 922fb6aa3ed1..5aee11c94f2a 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -202,7 +202,7 @@ static int camelot_prepare(struct snd_pcm_substream *substream)
202 struct snd_soc_pcm_runtime *rtd = substream->private_data; 202 struct snd_soc_pcm_runtime *rtd = substream->private_data;
203 struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id]; 203 struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
204 204
205 pr_debug("PCM data: addr 0x%08ulx len %d\n", 205 pr_debug("PCM data: addr 0x%08lx len %d\n",
206 (u32)runtime->dma_addr, runtime->dma_bytes); 206 (u32)runtime->dma_addr, runtime->dma_bytes);
207 207
208 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 208 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 59e250cc2e9d..e819e965e1db 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1526,14 +1526,14 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
1526 int ret; 1526 int ret;
1527 1527
1528 /* 1528 /*
1529 * 1) Avoid duplicate register (ex. MIXer case) 1529 * 1) Avoid duplicate register for DVC with MIX case
1530 * 2) re-register if card was rebinded 1530 * 2) Allow duplicate register for MIX
1531 * 3) re-register if card was rebinded
1531 */ 1532 */
1532 list_for_each_entry(kctrl, &card->controls, list) { 1533 list_for_each_entry(kctrl, &card->controls, list) {
1533 struct rsnd_kctrl_cfg *c = kctrl->private_data; 1534 struct rsnd_kctrl_cfg *c = kctrl->private_data;
1534 1535
1535 if (strcmp(kctrl->id.name, name) == 0 && 1536 if (c == cfg)
1536 c->mod == mod)
1537 return 0; 1537 return 0;
1538 } 1538 }
1539 1539
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 45ef295743ec..f5afab631abb 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -286,7 +286,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
286 if (rsnd_ssi_is_multi_slave(mod, io)) 286 if (rsnd_ssi_is_multi_slave(mod, io))
287 return 0; 287 return 0;
288 288
289 if (ssi->usrcnt > 1) { 289 if (ssi->usrcnt > 0) {
290 if (ssi->rate != rate) { 290 if (ssi->rate != rate) {
291 dev_err(dev, "SSI parent/child should use same rate\n"); 291 dev_err(dev, "SSI parent/child should use same rate\n");
292 return -EINVAL; 292 return -EINVAL;
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index c5934adcfd01..c74991dd18ab 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -79,7 +79,7 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
79 break; 79 break;
80 case 9: 80 case 9:
81 for (i = 0; i < 4; i++) 81 for (i = 0; i < 4; i++)
82 rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << (id * 4)); 82 rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << 4);
83 break; 83 break;
84 } 84 }
85 85
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0462b3ec977a..50617db05c46 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -735,14 +735,19 @@ static struct snd_soc_component *soc_find_component(
735 const struct device_node *of_node, const char *name) 735 const struct device_node *of_node, const char *name)
736{ 736{
737 struct snd_soc_component *component; 737 struct snd_soc_component *component;
738 struct device_node *component_of_node;
738 739
739 lockdep_assert_held(&client_mutex); 740 lockdep_assert_held(&client_mutex);
740 741
741 for_each_component(component) { 742 for_each_component(component) {
742 if (of_node) { 743 if (of_node) {
743 if (component->dev->of_node == of_node) 744 component_of_node = component->dev->of_node;
745 if (!component_of_node && component->dev->parent)
746 component_of_node = component->dev->parent->of_node;
747
748 if (component_of_node == of_node)
744 return component; 749 return component;
745 } else if (strcmp(component->name, name) == 0) { 750 } else if (name && strcmp(component->name, name) == 0) {
746 return component; 751 return component;
747 } 752 }
748 } 753 }
@@ -951,7 +956,7 @@ static void soc_remove_dai(struct snd_soc_dai *dai, int order)
951{ 956{
952 int err; 957 int err;
953 958
954 if (!dai || !dai->probed || 959 if (!dai || !dai->probed || !dai->driver ||
955 dai->driver->remove_order != order) 960 dai->driver->remove_order != order)
956 return; 961 return;
957 962
@@ -1034,17 +1039,18 @@ static int snd_soc_init_platform(struct snd_soc_card *card,
1034 * this function should be removed in the future 1039 * this function should be removed in the future
1035 */ 1040 */
1036 /* convert Legacy platform link */ 1041 /* convert Legacy platform link */
1037 if (!platform) { 1042 if (!platform || dai_link->legacy_platform) {
1038 platform = devm_kzalloc(card->dev, 1043 platform = devm_kzalloc(card->dev,
1039 sizeof(struct snd_soc_dai_link_component), 1044 sizeof(struct snd_soc_dai_link_component),
1040 GFP_KERNEL); 1045 GFP_KERNEL);
1041 if (!platform) 1046 if (!platform)
1042 return -ENOMEM; 1047 return -ENOMEM;
1043 1048
1044 dai_link->platform = platform; 1049 dai_link->platform = platform;
1045 platform->name = dai_link->platform_name; 1050 dai_link->legacy_platform = 1;
1046 platform->of_node = dai_link->platform_of_node; 1051 platform->name = dai_link->platform_name;
1047 platform->dai_name = NULL; 1052 platform->of_node = dai_link->platform_of_node;
1053 platform->dai_name = NULL;
1048 } 1054 }
1049 1055
1050 /* if there's no platform we match on the empty platform */ 1056 /* if there's no platform we match on the empty platform */
@@ -1129,6 +1135,15 @@ static int soc_init_dai_link(struct snd_soc_card *card,
1129 link->name); 1135 link->name);
1130 return -EINVAL; 1136 return -EINVAL;
1131 } 1137 }
1138
1139 /*
1140 * Defer card registartion if platform dai component is not added to
1141 * component list.
1142 */
1143 if ((link->platform->of_node || link->platform->name) &&
1144 !soc_find_component(link->platform->of_node, link->platform->name))
1145 return -EPROBE_DEFER;
1146
1132 /* 1147 /*
1133 * CPU device may be specified by either name or OF node, but 1148 * CPU device may be specified by either name or OF node, but
1134 * can be left unspecified, and will be matched based on DAI 1149 * can be left unspecified, and will be matched based on DAI
@@ -1140,6 +1155,15 @@ static int soc_init_dai_link(struct snd_soc_card *card,
1140 link->name); 1155 link->name);
1141 return -EINVAL; 1156 return -EINVAL;
1142 } 1157 }
1158
1159 /*
1160 * Defer card registartion if cpu dai component is not added to
1161 * component list.
1162 */
1163 if ((link->cpu_of_node || link->cpu_name) &&
1164 !soc_find_component(link->cpu_of_node, link->cpu_name))
1165 return -EPROBE_DEFER;
1166
1143 /* 1167 /*
1144 * At least one of CPU DAI name or CPU device name/node must be 1168 * At least one of CPU DAI name or CPU device name/node must be
1145 * specified 1169 * specified
@@ -2739,15 +2763,18 @@ int snd_soc_register_card(struct snd_soc_card *card)
2739 if (!card->name || !card->dev) 2763 if (!card->name || !card->dev)
2740 return -EINVAL; 2764 return -EINVAL;
2741 2765
2766 mutex_lock(&client_mutex);
2742 for_each_card_prelinks(card, i, link) { 2767 for_each_card_prelinks(card, i, link) {
2743 2768
2744 ret = soc_init_dai_link(card, link); 2769 ret = soc_init_dai_link(card, link);
2745 if (ret) { 2770 if (ret) {
2746 dev_err(card->dev, "ASoC: failed to init link %s\n", 2771 dev_err(card->dev, "ASoC: failed to init link %s\n",
2747 link->name); 2772 link->name);
2773 mutex_unlock(&client_mutex);
2748 return ret; 2774 return ret;
2749 } 2775 }
2750 } 2776 }
2777 mutex_unlock(&client_mutex);
2751 2778
2752 dev_set_drvdata(card->dev, card); 2779 dev_set_drvdata(card->dev, card);
2753 2780
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index a5178845065b..20bad755888b 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -70,12 +70,16 @@ static int dapm_up_seq[] = {
70 [snd_soc_dapm_clock_supply] = 1, 70 [snd_soc_dapm_clock_supply] = 1,
71 [snd_soc_dapm_supply] = 2, 71 [snd_soc_dapm_supply] = 2,
72 [snd_soc_dapm_micbias] = 3, 72 [snd_soc_dapm_micbias] = 3,
73 [snd_soc_dapm_vmid] = 3,
73 [snd_soc_dapm_dai_link] = 2, 74 [snd_soc_dapm_dai_link] = 2,
74 [snd_soc_dapm_dai_in] = 4, 75 [snd_soc_dapm_dai_in] = 4,
75 [snd_soc_dapm_dai_out] = 4, 76 [snd_soc_dapm_dai_out] = 4,
76 [snd_soc_dapm_aif_in] = 4, 77 [snd_soc_dapm_aif_in] = 4,
77 [snd_soc_dapm_aif_out] = 4, 78 [snd_soc_dapm_aif_out] = 4,
78 [snd_soc_dapm_mic] = 5, 79 [snd_soc_dapm_mic] = 5,
80 [snd_soc_dapm_siggen] = 5,
81 [snd_soc_dapm_input] = 5,
82 [snd_soc_dapm_output] = 5,
79 [snd_soc_dapm_mux] = 6, 83 [snd_soc_dapm_mux] = 6,
80 [snd_soc_dapm_demux] = 6, 84 [snd_soc_dapm_demux] = 6,
81 [snd_soc_dapm_dac] = 7, 85 [snd_soc_dapm_dac] = 7,
@@ -83,11 +87,19 @@ static int dapm_up_seq[] = {
83 [snd_soc_dapm_mixer] = 8, 87 [snd_soc_dapm_mixer] = 8,
84 [snd_soc_dapm_mixer_named_ctl] = 8, 88 [snd_soc_dapm_mixer_named_ctl] = 8,
85 [snd_soc_dapm_pga] = 9, 89 [snd_soc_dapm_pga] = 9,
90 [snd_soc_dapm_buffer] = 9,
91 [snd_soc_dapm_scheduler] = 9,
92 [snd_soc_dapm_effect] = 9,
93 [snd_soc_dapm_src] = 9,
94 [snd_soc_dapm_asrc] = 9,
95 [snd_soc_dapm_encoder] = 9,
96 [snd_soc_dapm_decoder] = 9,
86 [snd_soc_dapm_adc] = 10, 97 [snd_soc_dapm_adc] = 10,
87 [snd_soc_dapm_out_drv] = 11, 98 [snd_soc_dapm_out_drv] = 11,
88 [snd_soc_dapm_hp] = 11, 99 [snd_soc_dapm_hp] = 11,
89 [snd_soc_dapm_spk] = 11, 100 [snd_soc_dapm_spk] = 11,
90 [snd_soc_dapm_line] = 11, 101 [snd_soc_dapm_line] = 11,
102 [snd_soc_dapm_sink] = 11,
91 [snd_soc_dapm_kcontrol] = 12, 103 [snd_soc_dapm_kcontrol] = 12,
92 [snd_soc_dapm_post] = 13, 104 [snd_soc_dapm_post] = 13,
93}; 105};
@@ -100,13 +112,25 @@ static int dapm_down_seq[] = {
100 [snd_soc_dapm_spk] = 3, 112 [snd_soc_dapm_spk] = 3,
101 [snd_soc_dapm_line] = 3, 113 [snd_soc_dapm_line] = 3,
102 [snd_soc_dapm_out_drv] = 3, 114 [snd_soc_dapm_out_drv] = 3,
115 [snd_soc_dapm_sink] = 3,
103 [snd_soc_dapm_pga] = 4, 116 [snd_soc_dapm_pga] = 4,
117 [snd_soc_dapm_buffer] = 4,
118 [snd_soc_dapm_scheduler] = 4,
119 [snd_soc_dapm_effect] = 4,
120 [snd_soc_dapm_src] = 4,
121 [snd_soc_dapm_asrc] = 4,
122 [snd_soc_dapm_encoder] = 4,
123 [snd_soc_dapm_decoder] = 4,
104 [snd_soc_dapm_switch] = 5, 124 [snd_soc_dapm_switch] = 5,
105 [snd_soc_dapm_mixer_named_ctl] = 5, 125 [snd_soc_dapm_mixer_named_ctl] = 5,
106 [snd_soc_dapm_mixer] = 5, 126 [snd_soc_dapm_mixer] = 5,
107 [snd_soc_dapm_dac] = 6, 127 [snd_soc_dapm_dac] = 6,
108 [snd_soc_dapm_mic] = 7, 128 [snd_soc_dapm_mic] = 7,
129 [snd_soc_dapm_siggen] = 7,
130 [snd_soc_dapm_input] = 7,
131 [snd_soc_dapm_output] = 7,
109 [snd_soc_dapm_micbias] = 8, 132 [snd_soc_dapm_micbias] = 8,
133 [snd_soc_dapm_vmid] = 8,
110 [snd_soc_dapm_mux] = 9, 134 [snd_soc_dapm_mux] = 9,
111 [snd_soc_dapm_demux] = 9, 135 [snd_soc_dapm_demux] = 9,
112 [snd_soc_dapm_aif_in] = 10, 136 [snd_soc_dapm_aif_in] = 10,
@@ -2019,19 +2043,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
2019 out = is_connected_output_ep(w, NULL, NULL); 2043 out = is_connected_output_ep(w, NULL, NULL);
2020 } 2044 }
2021 2045
2022 ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", 2046 ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
2023 w->name, w->power ? "On" : "Off", 2047 w->name, w->power ? "On" : "Off",
2024 w->force ? " (forced)" : "", in, out); 2048 w->force ? " (forced)" : "", in, out);
2025 2049
2026 if (w->reg >= 0) 2050 if (w->reg >= 0)
2027 ret += snprintf(buf + ret, PAGE_SIZE - ret, 2051 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2028 " - R%d(0x%x) mask 0x%x", 2052 " - R%d(0x%x) mask 0x%x",
2029 w->reg, w->reg, w->mask << w->shift); 2053 w->reg, w->reg, w->mask << w->shift);
2030 2054
2031 ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); 2055 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
2032 2056
2033 if (w->sname) 2057 if (w->sname)
2034 ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", 2058 ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
2035 w->sname, 2059 w->sname,
2036 w->active ? "active" : "inactive"); 2060 w->active ? "active" : "inactive");
2037 2061
@@ -2044,7 +2068,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
2044 if (!p->connect) 2068 if (!p->connect)
2045 continue; 2069 continue;
2046 2070
2047 ret += snprintf(buf + ret, PAGE_SIZE - ret, 2071 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2048 " %s \"%s\" \"%s\"\n", 2072 " %s \"%s\" \"%s\"\n",
2049 (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out", 2073 (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
2050 p->name ? p->name : "static", 2074 p->name ? p->name : "static",
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 045ef136903d..731b963b6995 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -502,6 +502,7 @@ static void remove_dai(struct snd_soc_component *comp,
502{ 502{
503 struct snd_soc_dai_driver *dai_drv = 503 struct snd_soc_dai_driver *dai_drv =
504 container_of(dobj, struct snd_soc_dai_driver, dobj); 504 container_of(dobj, struct snd_soc_dai_driver, dobj);
505 struct snd_soc_dai *dai;
505 506
506 if (pass != SOC_TPLG_PASS_PCM_DAI) 507 if (pass != SOC_TPLG_PASS_PCM_DAI)
507 return; 508 return;
@@ -509,6 +510,10 @@ static void remove_dai(struct snd_soc_component *comp,
509 if (dobj->ops && dobj->ops->dai_unload) 510 if (dobj->ops && dobj->ops->dai_unload)
510 dobj->ops->dai_unload(comp, dobj); 511 dobj->ops->dai_unload(comp, dobj);
511 512
513 list_for_each_entry(dai, &comp->dai_list, list)
514 if (dai->driver == dai_drv)
515 dai->driver = NULL;
516
512 kfree(dai_drv->name); 517 kfree(dai_drv->name);
513 list_del(&dobj->list); 518 list_del(&dobj->list);
514 kfree(dai_drv); 519 kfree(dai_drv);
@@ -2482,6 +2487,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
2482 struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id) 2487 struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id)
2483{ 2488{
2484 struct soc_tplg tplg; 2489 struct soc_tplg tplg;
2490 int ret;
2485 2491
2486 /* setup parsing context */ 2492 /* setup parsing context */
2487 memset(&tplg, 0, sizeof(tplg)); 2493 memset(&tplg, 0, sizeof(tplg));
@@ -2495,7 +2501,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
2495 tplg.bytes_ext_ops = ops->bytes_ext_ops; 2501 tplg.bytes_ext_ops = ops->bytes_ext_ops;
2496 tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count; 2502 tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
2497 2503
2498 return soc_tplg_load(&tplg); 2504 ret = soc_tplg_load(&tplg);
2505 /* free the created components if fail to load topology */
2506 if (ret)
2507 snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL);
2508
2509 return ret;
2499} 2510}
2500EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); 2511EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
2501 2512
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index eeda6d5565bc..a10fcb5963c6 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -108,7 +108,7 @@ struct davinci_mcasp {
108 /* Used for comstraint setting on the second stream */ 108 /* Used for comstraint setting on the second stream */
109 u32 channels; 109 u32 channels;
110 110
111#ifdef CONFIG_PM_SLEEP 111#ifdef CONFIG_PM
112 struct davinci_mcasp_context context; 112 struct davinci_mcasp_context context;
113#endif 113#endif
114 114
@@ -1486,74 +1486,6 @@ static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai)
1486 return 0; 1486 return 0;
1487} 1487}
1488 1488
1489#ifdef CONFIG_PM_SLEEP
1490static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
1491{
1492 struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
1493 struct davinci_mcasp_context *context = &mcasp->context;
1494 u32 reg;
1495 int i;
1496
1497 context->pm_state = pm_runtime_active(mcasp->dev);
1498 if (!context->pm_state)
1499 pm_runtime_get_sync(mcasp->dev);
1500
1501 for (i = 0; i < ARRAY_SIZE(context_regs); i++)
1502 context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
1503
1504 if (mcasp->txnumevt) {
1505 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
1506 context->afifo_regs[0] = mcasp_get_reg(mcasp, reg);
1507 }
1508 if (mcasp->rxnumevt) {
1509 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
1510 context->afifo_regs[1] = mcasp_get_reg(mcasp, reg);
1511 }
1512
1513 for (i = 0; i < mcasp->num_serializer; i++)
1514 context->xrsr_regs[i] = mcasp_get_reg(mcasp,
1515 DAVINCI_MCASP_XRSRCTL_REG(i));
1516
1517 pm_runtime_put_sync(mcasp->dev);
1518
1519 return 0;
1520}
1521
1522static int davinci_mcasp_resume(struct snd_soc_dai *dai)
1523{
1524 struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
1525 struct davinci_mcasp_context *context = &mcasp->context;
1526 u32 reg;
1527 int i;
1528
1529 pm_runtime_get_sync(mcasp->dev);
1530
1531 for (i = 0; i < ARRAY_SIZE(context_regs); i++)
1532 mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
1533
1534 if (mcasp->txnumevt) {
1535 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
1536 mcasp_set_reg(mcasp, reg, context->afifo_regs[0]);
1537 }
1538 if (mcasp->rxnumevt) {
1539 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
1540 mcasp_set_reg(mcasp, reg, context->afifo_regs[1]);
1541 }
1542
1543 for (i = 0; i < mcasp->num_serializer; i++)
1544 mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
1545 context->xrsr_regs[i]);
1546
1547 if (!context->pm_state)
1548 pm_runtime_put_sync(mcasp->dev);
1549
1550 return 0;
1551}
1552#else
1553#define davinci_mcasp_suspend NULL
1554#define davinci_mcasp_resume NULL
1555#endif
1556
1557#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_192000 1489#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_192000
1558 1490
1559#define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \ 1491#define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \
@@ -1571,8 +1503,6 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
1571 { 1503 {
1572 .name = "davinci-mcasp.0", 1504 .name = "davinci-mcasp.0",
1573 .probe = davinci_mcasp_dai_probe, 1505 .probe = davinci_mcasp_dai_probe,
1574 .suspend = davinci_mcasp_suspend,
1575 .resume = davinci_mcasp_resume,
1576 .playback = { 1506 .playback = {
1577 .channels_min = 1, 1507 .channels_min = 1,
1578 .channels_max = 32 * 16, 1508 .channels_max = 32 * 16,
@@ -1976,7 +1906,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1976 } 1906 }
1977 1907
1978 mcasp->num_serializer = pdata->num_serializer; 1908 mcasp->num_serializer = pdata->num_serializer;
1979#ifdef CONFIG_PM_SLEEP 1909#ifdef CONFIG_PM
1980 mcasp->context.xrsr_regs = devm_kcalloc(&pdev->dev, 1910 mcasp->context.xrsr_regs = devm_kcalloc(&pdev->dev,
1981 mcasp->num_serializer, sizeof(u32), 1911 mcasp->num_serializer, sizeof(u32),
1982 GFP_KERNEL); 1912 GFP_KERNEL);
@@ -2196,11 +2126,73 @@ static int davinci_mcasp_remove(struct platform_device *pdev)
2196 return 0; 2126 return 0;
2197} 2127}
2198 2128
2129#ifdef CONFIG_PM
2130static int davinci_mcasp_runtime_suspend(struct device *dev)
2131{
2132 struct davinci_mcasp *mcasp = dev_get_drvdata(dev);
2133 struct davinci_mcasp_context *context = &mcasp->context;
2134 u32 reg;
2135 int i;
2136
2137 for (i = 0; i < ARRAY_SIZE(context_regs); i++)
2138 context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
2139
2140 if (mcasp->txnumevt) {
2141 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
2142 context->afifo_regs[0] = mcasp_get_reg(mcasp, reg);
2143 }
2144 if (mcasp->rxnumevt) {
2145 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
2146 context->afifo_regs[1] = mcasp_get_reg(mcasp, reg);
2147 }
2148
2149 for (i = 0; i < mcasp->num_serializer; i++)
2150 context->xrsr_regs[i] = mcasp_get_reg(mcasp,
2151 DAVINCI_MCASP_XRSRCTL_REG(i));
2152
2153 return 0;
2154}
2155
2156static int davinci_mcasp_runtime_resume(struct device *dev)
2157{
2158 struct davinci_mcasp *mcasp = dev_get_drvdata(dev);
2159 struct davinci_mcasp_context *context = &mcasp->context;
2160 u32 reg;
2161 int i;
2162
2163 for (i = 0; i < ARRAY_SIZE(context_regs); i++)
2164 mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
2165
2166 if (mcasp->txnumevt) {
2167 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
2168 mcasp_set_reg(mcasp, reg, context->afifo_regs[0]);
2169 }
2170 if (mcasp->rxnumevt) {
2171 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
2172 mcasp_set_reg(mcasp, reg, context->afifo_regs[1]);
2173 }
2174
2175 for (i = 0; i < mcasp->num_serializer; i++)
2176 mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
2177 context->xrsr_regs[i]);
2178
2179 return 0;
2180}
2181
2182#endif
2183
2184static const struct dev_pm_ops davinci_mcasp_pm_ops = {
2185 SET_RUNTIME_PM_OPS(davinci_mcasp_runtime_suspend,
2186 davinci_mcasp_runtime_resume,
2187 NULL)
2188};
2189
2199static struct platform_driver davinci_mcasp_driver = { 2190static struct platform_driver davinci_mcasp_driver = {
2200 .probe = davinci_mcasp_probe, 2191 .probe = davinci_mcasp_probe,
2201 .remove = davinci_mcasp_remove, 2192 .remove = davinci_mcasp_remove,
2202 .driver = { 2193 .driver = {
2203 .name = "davinci-mcasp", 2194 .name = "davinci-mcasp",
2195 .pm = &davinci_mcasp_pm_ops,
2204 .of_match_table = mcasp_dt_ids, 2196 .of_match_table = mcasp_dt_ids,
2205 }, 2197 },
2206}; 2198};
diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig
index 25e287feb58c..723a583a8d57 100644
--- a/sound/soc/xilinx/Kconfig
+++ b/sound/soc/xilinx/Kconfig
@@ -1,5 +1,5 @@
1config SND_SOC_XILINX_I2S 1config SND_SOC_XILINX_I2S
2 tristate "Audio support for the the Xilinx I2S" 2 tristate "Audio support for the Xilinx I2S"
3 help 3 help
4 Select this option to enable Xilinx I2S Audio. This enables 4 Select this option to enable Xilinx I2S Audio. This enables
5 I2S playback and capture using xilinx soft IP. In transmitter 5 I2S playback and capture using xilinx soft IP. In transmitter
diff --git a/sound/soc/xilinx/xlnx_i2s.c b/sound/soc/xilinx/xlnx_i2s.c
index d4ae9eff41ce..8b353166ad44 100644
--- a/sound/soc/xilinx/xlnx_i2s.c
+++ b/sound/soc/xilinx/xlnx_i2s.c
@@ -1,12 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2//
3 * Xilinx ASoC I2S audio support 3// Xilinx ASoC I2S audio support
4 * 4//
5 * Copyright (C) 2018 Xilinx, Inc. 5// Copyright (C) 2018 Xilinx, Inc.
6 * 6//
7 * Author: Praveen Vuppala <praveenv@xilinx.com> 7// Author: Praveen Vuppala <praveenv@xilinx.com>
8 * Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com> 8// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
9 */
10 9
11#include <linux/io.h> 10#include <linux/io.h>
12#include <linux/module.h> 11#include <linux/module.h>
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 7609eceba1a2..9e71d7cda999 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -2541,8 +2541,8 @@ static int snd_dbri_create(struct snd_card *card,
2541 dbri->op = op; 2541 dbri->op = op;
2542 dbri->irq = irq; 2542 dbri->irq = irq;
2543 2543
2544 dbri->dma = dma_zalloc_coherent(&op->dev, sizeof(struct dbri_dma), 2544 dbri->dma = dma_alloc_coherent(&op->dev, sizeof(struct dbri_dma),
2545 &dbri->dma_dvma, GFP_KERNEL); 2545 &dbri->dma_dvma, GFP_KERNEL);
2546 if (!dbri->dma) 2546 if (!dbri->dma)
2547 return -ENOMEM; 2547 return -ENOMEM;
2548 2548
diff --git a/sound/usb/card.c b/sound/usb/card.c
index a105947eaf55..746a72e23cf9 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -246,7 +246,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
246 h1 = snd_usb_find_csint_desc(host_iface->extra, 246 h1 = snd_usb_find_csint_desc(host_iface->extra,
247 host_iface->extralen, 247 host_iface->extralen,
248 NULL, UAC_HEADER); 248 NULL, UAC_HEADER);
249 if (!h1) { 249 if (!h1 || h1->bLength < sizeof(*h1)) {
250 dev_err(&dev->dev, "cannot find UAC_HEADER\n"); 250 dev_err(&dev->dev, "cannot find UAC_HEADER\n");
251 return -EINVAL; 251 return -EINVAL;
252 } 252 }
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c63c84b54969..e7d441d0e839 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -753,8 +753,9 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
753 struct uac_mixer_unit_descriptor *desc) 753 struct uac_mixer_unit_descriptor *desc)
754{ 754{
755 int mu_channels; 755 int mu_channels;
756 void *c;
756 757
757 if (desc->bLength < 11) 758 if (desc->bLength < sizeof(*desc))
758 return -EINVAL; 759 return -EINVAL;
759 if (!desc->bNrInPins) 760 if (!desc->bNrInPins)
760 return -EINVAL; 761 return -EINVAL;
@@ -763,6 +764,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
763 case UAC_VERSION_1: 764 case UAC_VERSION_1:
764 case UAC_VERSION_2: 765 case UAC_VERSION_2:
765 default: 766 default:
767 if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1)
768 return 0; /* no bmControls -> skip */
766 mu_channels = uac_mixer_unit_bNrChannels(desc); 769 mu_channels = uac_mixer_unit_bNrChannels(desc);
767 break; 770 break;
768 case UAC_VERSION_3: 771 case UAC_VERSION_3:
@@ -772,7 +775,11 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
772 } 775 }
773 776
774 if (!mu_channels) 777 if (!mu_channels)
775 return -EINVAL; 778 return 0;
779
780 c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
781 if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
782 return 0; /* no bmControls -> skip */
776 783
777 return mu_channels; 784 return mu_channels;
778} 785}
@@ -944,7 +951,7 @@ static int check_input_term(struct mixer_build *state, int id,
944 struct uac_mixer_unit_descriptor *d = p1; 951 struct uac_mixer_unit_descriptor *d = p1;
945 952
946 err = uac_mixer_unit_get_channels(state, d); 953 err = uac_mixer_unit_get_channels(state, d);
947 if (err < 0) 954 if (err <= 0)
948 return err; 955 return err;
949 956
950 term->channels = err; 957 term->channels = err;
@@ -2068,11 +2075,15 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
2068 2075
2069 if (state->mixer->protocol == UAC_VERSION_2) { 2076 if (state->mixer->protocol == UAC_VERSION_2) {
2070 struct uac2_input_terminal_descriptor *d_v2 = raw_desc; 2077 struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
2078 if (d_v2->bLength < sizeof(*d_v2))
2079 return -EINVAL;
2071 control = UAC2_TE_CONNECTOR; 2080 control = UAC2_TE_CONNECTOR;
2072 term_id = d_v2->bTerminalID; 2081 term_id = d_v2->bTerminalID;
2073 bmctls = le16_to_cpu(d_v2->bmControls); 2082 bmctls = le16_to_cpu(d_v2->bmControls);
2074 } else if (state->mixer->protocol == UAC_VERSION_3) { 2083 } else if (state->mixer->protocol == UAC_VERSION_3) {
2075 struct uac3_input_terminal_descriptor *d_v3 = raw_desc; 2084 struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
2085 if (d_v3->bLength < sizeof(*d_v3))
2086 return -EINVAL;
2076 control = UAC3_TE_INSERTION; 2087 control = UAC3_TE_INSERTION;
2077 term_id = d_v3->bTerminalID; 2088 term_id = d_v3->bTerminalID;
2078 bmctls = le32_to_cpu(d_v3->bmControls); 2089 bmctls = le32_to_cpu(d_v3->bmControls);
@@ -2118,7 +2129,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
2118 if (err < 0) 2129 if (err < 0)
2119 continue; 2130 continue;
2120 /* no bmControls field (e.g. Maya44) -> ignore */ 2131 /* no bmControls field (e.g. Maya44) -> ignore */
2121 if (desc->bLength <= 10 + input_pins) 2132 if (!num_outs)
2122 continue; 2133 continue;
2123 err = check_input_term(state, desc->baSourceID[pin], &iterm); 2134 err = check_input_term(state, desc->baSourceID[pin], &iterm);
2124 if (err < 0) 2135 if (err < 0)
@@ -2314,7 +2325,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
2314 char *name) 2325 char *name)
2315{ 2326{
2316 struct uac_processing_unit_descriptor *desc = raw_desc; 2327 struct uac_processing_unit_descriptor *desc = raw_desc;
2317 int num_ins = desc->bNrInPins; 2328 int num_ins;
2318 struct usb_mixer_elem_info *cval; 2329 struct usb_mixer_elem_info *cval;
2319 struct snd_kcontrol *kctl; 2330 struct snd_kcontrol *kctl;
2320 int i, err, nameid, type, len; 2331 int i, err, nameid, type, len;
@@ -2329,7 +2340,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
2329 0, NULL, default_value_info 2340 0, NULL, default_value_info
2330 }; 2341 };
2331 2342
2332 if (desc->bLength < 13 || desc->bLength < 13 + num_ins || 2343 if (desc->bLength < 13) {
2344 usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
2345 return -EINVAL;
2346 }
2347
2348 num_ins = desc->bNrInPins;
2349 if (desc->bLength < 13 + num_ins ||
2333 desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) { 2350 desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
2334 usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid); 2351 usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
2335 return -EINVAL; 2352 return -EINVAL;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 382847154227..db114f3977e0 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -314,6 +314,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
314 return 0; 314 return 0;
315} 315}
316 316
317/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
318 * applies. Returns 1 if a quirk was found.
319 */
317static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, 320static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
318 struct usb_device *dev, 321 struct usb_device *dev,
319 struct usb_interface_descriptor *altsd, 322 struct usb_interface_descriptor *altsd,
@@ -384,7 +387,7 @@ add_sync_ep:
384 387
385 subs->data_endpoint->sync_master = subs->sync_endpoint; 388 subs->data_endpoint->sync_master = subs->sync_endpoint;
386 389
387 return 0; 390 return 1;
388} 391}
389 392
390static int set_sync_endpoint(struct snd_usb_substream *subs, 393static int set_sync_endpoint(struct snd_usb_substream *subs,
@@ -423,6 +426,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
423 if (err < 0) 426 if (err < 0)
424 return err; 427 return err;
425 428
429 /* endpoint set by quirk */
430 if (err > 0)
431 return 0;
432
426 if (altsd->bNumEndpoints < 2) 433 if (altsd->bNumEndpoints < 2)
427 return 0; 434 return 0;
428 435
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 37fc0447c071..b345beb447bd 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3326,6 +3326,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
3326 } 3326 }
3327 } 3327 }
3328 }, 3328 },
3329 {
3330 .ifnum = -1
3331 },
3329 } 3332 }
3330 } 3333 }
3331}, 3334},
@@ -3369,6 +3372,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
3369 } 3372 }
3370 } 3373 }
3371 }, 3374 },
3375 {
3376 .ifnum = -1
3377 },
3372 } 3378 }
3373 } 3379 }
3374}, 3380},
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 96340f23f86d..7e65fe853ee3 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -768,7 +768,7 @@ static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
768 * REG1: PLL binary search enable, soft mute enable. 768 * REG1: PLL binary search enable, soft mute enable.
769 */ 769 */
770 CM6206_REG1_PLLBIN_EN | 770 CM6206_REG1_PLLBIN_EN |
771 CM6206_REG1_SOFT_MUTE_EN | 771 CM6206_REG1_SOFT_MUTE_EN,
772 /* 772 /*
773 * REG2: enable output drivers, 773 * REG2: enable output drivers,
774 * select front channels to the headphone output, 774 * select front channels to the headphone output,
@@ -1492,6 +1492,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1492 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1492 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1493 break; 1493 break;
1494 1494
1495 case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
1495 case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */ 1496 case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
1496 case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ 1497 case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
1497 case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ 1498 case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
@@ -1566,6 +1567,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1566 case 0x20b1: /* XMOS based devices */ 1567 case 0x20b1: /* XMOS based devices */
1567 case 0x152a: /* Thesycon devices */ 1568 case 0x152a: /* Thesycon devices */
1568 case 0x25ce: /* Mytek devices */ 1569 case 0x25ce: /* Mytek devices */
1570 case 0x2ab6: /* T+A devices */
1569 if (fp->dsd_raw) 1571 if (fp->dsd_raw)
1570 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1572 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1571 break; 1573 break;
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 67cf849aa16b..d9e3de495c16 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -596,12 +596,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
596 csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT); 596 csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
597 597
598 if (!csep || csep->bLength < 7 || 598 if (!csep || csep->bLength < 7 ||
599 csep->bDescriptorSubtype != UAC_EP_GENERAL) { 599 csep->bDescriptorSubtype != UAC_EP_GENERAL)
600 usb_audio_warn(chip, 600 goto error;
601 "%u:%d : no or invalid class specific endpoint descriptor\n",
602 iface_no, altsd->bAlternateSetting);
603 return 0;
604 }
605 601
606 if (protocol == UAC_VERSION_1) { 602 if (protocol == UAC_VERSION_1) {
607 attributes = csep->bmAttributes; 603 attributes = csep->bmAttributes;
@@ -609,6 +605,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
609 struct uac2_iso_endpoint_descriptor *csep2 = 605 struct uac2_iso_endpoint_descriptor *csep2 =
610 (struct uac2_iso_endpoint_descriptor *) csep; 606 (struct uac2_iso_endpoint_descriptor *) csep;
611 607
608 if (csep2->bLength < sizeof(*csep2))
609 goto error;
612 attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX; 610 attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
613 611
614 /* emulate the endpoint attributes of a v1 device */ 612 /* emulate the endpoint attributes of a v1 device */
@@ -618,12 +616,20 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
618 struct uac3_iso_endpoint_descriptor *csep3 = 616 struct uac3_iso_endpoint_descriptor *csep3 =
619 (struct uac3_iso_endpoint_descriptor *) csep; 617 (struct uac3_iso_endpoint_descriptor *) csep;
620 618
619 if (csep3->bLength < sizeof(*csep3))
620 goto error;
621 /* emulate the endpoint attributes of a v1 device */ 621 /* emulate the endpoint attributes of a v1 device */
622 if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH) 622 if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
623 attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; 623 attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
624 } 624 }
625 625
626 return attributes; 626 return attributes;
627
628 error:
629 usb_audio_warn(chip,
630 "%u:%d : no or invalid class specific endpoint descriptor\n",
631 iface_no, altsd->bAlternateSetting);
632 return 0;
627} 633}
628 634
629/* find an input terminal descriptor (either UAC1 or UAC2) with the given 635/* find an input terminal descriptor (either UAC1 or UAC2) with the given
@@ -631,13 +637,17 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
631 */ 637 */
632static void * 638static void *
633snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface, 639snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
634 int terminal_id) 640 int terminal_id, bool uac23)
635{ 641{
636 struct uac2_input_terminal_descriptor *term = NULL; 642 struct uac2_input_terminal_descriptor *term = NULL;
643 size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) :
644 sizeof(struct uac_input_terminal_descriptor);
637 645
638 while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, 646 while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
639 ctrl_iface->extralen, 647 ctrl_iface->extralen,
640 term, UAC_INPUT_TERMINAL))) { 648 term, UAC_INPUT_TERMINAL))) {
649 if (term->bLength < minlen)
650 continue;
641 if (term->bTerminalID == terminal_id) 651 if (term->bTerminalID == terminal_id)
642 return term; 652 return term;
643 } 653 }
@@ -655,7 +665,8 @@ snd_usb_find_output_terminal_descriptor(struct usb_host_interface *ctrl_iface,
655 while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, 665 while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
656 ctrl_iface->extralen, 666 ctrl_iface->extralen,
657 term, UAC_OUTPUT_TERMINAL))) { 667 term, UAC_OUTPUT_TERMINAL))) {
658 if (term->bTerminalID == terminal_id) 668 if (term->bLength >= sizeof(*term) &&
669 term->bTerminalID == terminal_id)
659 return term; 670 return term;
660 } 671 }
661 672
@@ -729,7 +740,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
729 format = le16_to_cpu(as->wFormatTag); /* remember the format value */ 740 format = le16_to_cpu(as->wFormatTag); /* remember the format value */
730 741
731 iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, 742 iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
732 as->bTerminalLink); 743 as->bTerminalLink,
744 false);
733 if (iterm) { 745 if (iterm) {
734 num_channels = iterm->bNrChannels; 746 num_channels = iterm->bNrChannels;
735 chconfig = le16_to_cpu(iterm->wChannelConfig); 747 chconfig = le16_to_cpu(iterm->wChannelConfig);
@@ -764,7 +776,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
764 * to extract the clock 776 * to extract the clock
765 */ 777 */
766 input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, 778 input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
767 as->bTerminalLink); 779 as->bTerminalLink,
780 true);
768 if (input_term) { 781 if (input_term) {
769 clock = input_term->bCSourceID; 782 clock = input_term->bCSourceID;
770 if (!chconfig && (num_channels == input_term->bNrChannels)) 783 if (!chconfig && (num_channels == input_term->bNrChannels))
@@ -998,7 +1011,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
998 * to extract the clock 1011 * to extract the clock
999 */ 1012 */
1000 input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, 1013 input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
1001 as->bTerminalLink); 1014 as->bTerminalLink,
1015 true);
1002 if (input_term) { 1016 if (input_term) {
1003 clock = input_term->bCSourceID; 1017 clock = input_term->bCSourceID;
1004 goto found_clock; 1018 goto found_clock;
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
index ff91192407d1..f599064dd8dc 100644
--- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -47,6 +47,7 @@ enum perf_event_powerpc_regs {
47 PERF_REG_POWERPC_DAR, 47 PERF_REG_POWERPC_DAR,
48 PERF_REG_POWERPC_DSISR, 48 PERF_REG_POWERPC_DSISR,
49 PERF_REG_POWERPC_SIER, 49 PERF_REG_POWERPC_SIER,
50 PERF_REG_POWERPC_MMCRA,
50 PERF_REG_POWERPC_MAX, 51 PERF_REG_POWERPC_MAX,
51}; 52};
52#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ 53#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/tools/arch/powerpc/include/uapi/asm/unistd.h b/tools/arch/powerpc/include/uapi/asm/unistd.h
deleted file mode 100644
index 985534d0b448..000000000000
--- a/tools/arch/powerpc/include/uapi/asm/unistd.h
+++ /dev/null
@@ -1,404 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2/*
3 * This file contains the system call numbers.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10#ifndef _UAPI_ASM_POWERPC_UNISTD_H_
11#define _UAPI_ASM_POWERPC_UNISTD_H_
12
13
14#define __NR_restart_syscall 0
15#define __NR_exit 1
16#define __NR_fork 2
17#define __NR_read 3
18#define __NR_write 4
19#define __NR_open 5
20#define __NR_close 6
21#define __NR_waitpid 7
22#define __NR_creat 8
23#define __NR_link 9
24#define __NR_unlink 10
25#define __NR_execve 11
26#define __NR_chdir 12
27#define __NR_time 13
28#define __NR_mknod 14
29#define __NR_chmod 15
30#define __NR_lchown 16
31#define __NR_break 17
32#define __NR_oldstat 18
33#define __NR_lseek 19
34#define __NR_getpid 20
35#define __NR_mount 21
36#define __NR_umount 22
37#define __NR_setuid 23
38#define __NR_getuid 24
39#define __NR_stime 25
40#define __NR_ptrace 26
41#define __NR_alarm 27
42#define __NR_oldfstat 28
43#define __NR_pause 29
44#define __NR_utime 30
45#define __NR_stty 31
46#define __NR_gtty 32
47#define __NR_access 33
48#define __NR_nice 34
49#define __NR_ftime 35
50#define __NR_sync 36
51#define __NR_kill 37
52#define __NR_rename 38
53#define __NR_mkdir 39
54#define __NR_rmdir 40
55#define __NR_dup 41
56#define __NR_pipe 42
57#define __NR_times 43
58#define __NR_prof 44
59#define __NR_brk 45
60#define __NR_setgid 46
61#define __NR_getgid 47
62#define __NR_signal 48
63#define __NR_geteuid 49
64#define __NR_getegid 50
65#define __NR_acct 51
66#define __NR_umount2 52
67#define __NR_lock 53
68#define __NR_ioctl 54
69#define __NR_fcntl 55
70#define __NR_mpx 56
71#define __NR_setpgid 57
72#define __NR_ulimit 58
73#define __NR_oldolduname 59
74#define __NR_umask 60
75#define __NR_chroot 61
76#define __NR_ustat 62
77#define __NR_dup2 63
78#define __NR_getppid 64
79#define __NR_getpgrp 65
80#define __NR_setsid 66
81#define __NR_sigaction 67
82#define __NR_sgetmask 68
83#define __NR_ssetmask 69
84#define __NR_setreuid 70
85#define __NR_setregid 71
86#define __NR_sigsuspend 72
87#define __NR_sigpending 73
88#define __NR_sethostname 74
89#define __NR_setrlimit 75
90#define __NR_getrlimit 76
91#define __NR_getrusage 77
92#define __NR_gettimeofday 78
93#define __NR_settimeofday 79
94#define __NR_getgroups 80
95#define __NR_setgroups 81
96#define __NR_select 82
97#define __NR_symlink 83
98#define __NR_oldlstat 84
99#define __NR_readlink 85
100#define __NR_uselib 86
101#define __NR_swapon 87
102#define __NR_reboot 88
103#define __NR_readdir 89
104#define __NR_mmap 90
105#define __NR_munmap 91
106#define __NR_truncate 92
107#define __NR_ftruncate 93
108#define __NR_fchmod 94
109#define __NR_fchown 95
110#define __NR_getpriority 96
111#define __NR_setpriority 97
112#define __NR_profil 98
113#define __NR_statfs 99
114#define __NR_fstatfs 100
115#define __NR_ioperm 101
116#define __NR_socketcall 102
117#define __NR_syslog 103
118#define __NR_setitimer 104
119#define __NR_getitimer 105
120#define __NR_stat 106
121#define __NR_lstat 107
122#define __NR_fstat 108
123#define __NR_olduname 109
124#define __NR_iopl 110
125#define __NR_vhangup 111
126#define __NR_idle 112
127#define __NR_vm86 113
128#define __NR_wait4 114
129#define __NR_swapoff 115
130#define __NR_sysinfo 116
131#define __NR_ipc 117
132#define __NR_fsync 118
133#define __NR_sigreturn 119
134#define __NR_clone 120
135#define __NR_setdomainname 121
136#define __NR_uname 122
137#define __NR_modify_ldt 123
138#define __NR_adjtimex 124
139#define __NR_mprotect 125
140#define __NR_sigprocmask 126
141#define __NR_create_module 127
142#define __NR_init_module 128
143#define __NR_delete_module 129
144#define __NR_get_kernel_syms 130
145#define __NR_quotactl 131
146#define __NR_getpgid 132
147#define __NR_fchdir 133
148#define __NR_bdflush 134
149#define __NR_sysfs 135
150#define __NR_personality 136
151#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
152#define __NR_setfsuid 138
153#define __NR_setfsgid 139
154#define __NR__llseek 140
155#define __NR_getdents 141
156#define __NR__newselect 142
157#define __NR_flock 143
158#define __NR_msync 144
159#define __NR_readv 145
160#define __NR_writev 146
161#define __NR_getsid 147
162#define __NR_fdatasync 148
163#define __NR__sysctl 149
164#define __NR_mlock 150
165#define __NR_munlock 151
166#define __NR_mlockall 152
167#define __NR_munlockall 153
168#define __NR_sched_setparam 154
169#define __NR_sched_getparam 155
170#define __NR_sched_setscheduler 156
171#define __NR_sched_getscheduler 157
172#define __NR_sched_yield 158
173#define __NR_sched_get_priority_max 159
174#define __NR_sched_get_priority_min 160
175#define __NR_sched_rr_get_interval 161
176#define __NR_nanosleep 162
177#define __NR_mremap 163
178#define __NR_setresuid 164
179#define __NR_getresuid 165
180#define __NR_query_module 166
181#define __NR_poll 167
182#define __NR_nfsservctl 168
183#define __NR_setresgid 169
184#define __NR_getresgid 170
185#define __NR_prctl 171
186#define __NR_rt_sigreturn 172
187#define __NR_rt_sigaction 173
188#define __NR_rt_sigprocmask 174
189#define __NR_rt_sigpending 175
190#define __NR_rt_sigtimedwait 176
191#define __NR_rt_sigqueueinfo 177
192#define __NR_rt_sigsuspend 178
193#define __NR_pread64 179
194#define __NR_pwrite64 180
195#define __NR_chown 181
196#define __NR_getcwd 182
197#define __NR_capget 183
198#define __NR_capset 184
199#define __NR_sigaltstack 185
200#define __NR_sendfile 186
201#define __NR_getpmsg 187 /* some people actually want streams */
202#define __NR_putpmsg 188 /* some people actually want streams */
203#define __NR_vfork 189
204#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
205#define __NR_readahead 191
206#ifndef __powerpc64__ /* these are 32-bit only */
207#define __NR_mmap2 192
208#define __NR_truncate64 193
209#define __NR_ftruncate64 194
210#define __NR_stat64 195
211#define __NR_lstat64 196
212#define __NR_fstat64 197
213#endif
214#define __NR_pciconfig_read 198
215#define __NR_pciconfig_write 199
216#define __NR_pciconfig_iobase 200
217#define __NR_multiplexer 201
218#define __NR_getdents64 202
219#define __NR_pivot_root 203
220#ifndef __powerpc64__
221#define __NR_fcntl64 204
222#endif
223#define __NR_madvise 205
224#define __NR_mincore 206
225#define __NR_gettid 207
226#define __NR_tkill 208
227#define __NR_setxattr 209
228#define __NR_lsetxattr 210
229#define __NR_fsetxattr 211
230#define __NR_getxattr 212
231#define __NR_lgetxattr 213
232#define __NR_fgetxattr 214
233#define __NR_listxattr 215
234#define __NR_llistxattr 216
235#define __NR_flistxattr 217
236#define __NR_removexattr 218
237#define __NR_lremovexattr 219
238#define __NR_fremovexattr 220
239#define __NR_futex 221
240#define __NR_sched_setaffinity 222
241#define __NR_sched_getaffinity 223
242/* 224 currently unused */
243#define __NR_tuxcall 225
244#ifndef __powerpc64__
245#define __NR_sendfile64 226
246#endif
247#define __NR_io_setup 227
248#define __NR_io_destroy 228
249#define __NR_io_getevents 229
250#define __NR_io_submit 230
251#define __NR_io_cancel 231
252#define __NR_set_tid_address 232
253#define __NR_fadvise64 233
254#define __NR_exit_group 234
255#define __NR_lookup_dcookie 235
256#define __NR_epoll_create 236
257#define __NR_epoll_ctl 237
258#define __NR_epoll_wait 238
259#define __NR_remap_file_pages 239
260#define __NR_timer_create 240
261#define __NR_timer_settime 241
262#define __NR_timer_gettime 242
263#define __NR_timer_getoverrun 243
264#define __NR_timer_delete 244
265#define __NR_clock_settime 245
266#define __NR_clock_gettime 246
267#define __NR_clock_getres 247
268#define __NR_clock_nanosleep 248
269#define __NR_swapcontext 249
270#define __NR_tgkill 250
271#define __NR_utimes 251
272#define __NR_statfs64 252
273#define __NR_fstatfs64 253
274#ifndef __powerpc64__
275#define __NR_fadvise64_64 254
276#endif
277#define __NR_rtas 255
278#define __NR_sys_debug_setcontext 256
279/* Number 257 is reserved for vserver */
280#define __NR_migrate_pages 258
281#define __NR_mbind 259
282#define __NR_get_mempolicy 260
283#define __NR_set_mempolicy 261
284#define __NR_mq_open 262
285#define __NR_mq_unlink 263
286#define __NR_mq_timedsend 264
287#define __NR_mq_timedreceive 265
288#define __NR_mq_notify 266
289#define __NR_mq_getsetattr 267
290#define __NR_kexec_load 268
291#define __NR_add_key 269
292#define __NR_request_key 270
293#define __NR_keyctl 271
294#define __NR_waitid 272
295#define __NR_ioprio_set 273
296#define __NR_ioprio_get 274
297#define __NR_inotify_init 275
298#define __NR_inotify_add_watch 276
299#define __NR_inotify_rm_watch 277
300#define __NR_spu_run 278
301#define __NR_spu_create 279
302#define __NR_pselect6 280
303#define __NR_ppoll 281
304#define __NR_unshare 282
305#define __NR_splice 283
306#define __NR_tee 284
307#define __NR_vmsplice 285
308#define __NR_openat 286
309#define __NR_mkdirat 287
310#define __NR_mknodat 288
311#define __NR_fchownat 289
312#define __NR_futimesat 290
313#ifdef __powerpc64__
314#define __NR_newfstatat 291
315#else
316#define __NR_fstatat64 291
317#endif
318#define __NR_unlinkat 292
319#define __NR_renameat 293
320#define __NR_linkat 294
321#define __NR_symlinkat 295
322#define __NR_readlinkat 296
323#define __NR_fchmodat 297
324#define __NR_faccessat 298
325#define __NR_get_robust_list 299
326#define __NR_set_robust_list 300
327#define __NR_move_pages 301
328#define __NR_getcpu 302
329#define __NR_epoll_pwait 303
330#define __NR_utimensat 304
331#define __NR_signalfd 305
332#define __NR_timerfd_create 306
333#define __NR_eventfd 307
334#define __NR_sync_file_range2 308
335#define __NR_fallocate 309
336#define __NR_subpage_prot 310
337#define __NR_timerfd_settime 311
338#define __NR_timerfd_gettime 312
339#define __NR_signalfd4 313
340#define __NR_eventfd2 314
341#define __NR_epoll_create1 315
342#define __NR_dup3 316
343#define __NR_pipe2 317
344#define __NR_inotify_init1 318
345#define __NR_perf_event_open 319
346#define __NR_preadv 320
347#define __NR_pwritev 321
348#define __NR_rt_tgsigqueueinfo 322
349#define __NR_fanotify_init 323
350#define __NR_fanotify_mark 324
351#define __NR_prlimit64 325
352#define __NR_socket 326
353#define __NR_bind 327
354#define __NR_connect 328
355#define __NR_listen 329
356#define __NR_accept 330
357#define __NR_getsockname 331
358#define __NR_getpeername 332
359#define __NR_socketpair 333
360#define __NR_send 334
361#define __NR_sendto 335
362#define __NR_recv 336
363#define __NR_recvfrom 337
364#define __NR_shutdown 338
365#define __NR_setsockopt 339
366#define __NR_getsockopt 340
367#define __NR_sendmsg 341
368#define __NR_recvmsg 342
369#define __NR_recvmmsg 343
370#define __NR_accept4 344
371#define __NR_name_to_handle_at 345
372#define __NR_open_by_handle_at 346
373#define __NR_clock_adjtime 347
374#define __NR_syncfs 348
375#define __NR_sendmmsg 349
376#define __NR_setns 350
377#define __NR_process_vm_readv 351
378#define __NR_process_vm_writev 352
379#define __NR_finit_module 353
380#define __NR_kcmp 354
381#define __NR_sched_setattr 355
382#define __NR_sched_getattr 356
383#define __NR_renameat2 357
384#define __NR_seccomp 358
385#define __NR_getrandom 359
386#define __NR_memfd_create 360
387#define __NR_bpf 361
388#define __NR_execveat 362
389#define __NR_switch_endian 363
390#define __NR_userfaultfd 364
391#define __NR_membarrier 365
392#define __NR_mlock2 378
393#define __NR_copy_file_range 379
394#define __NR_preadv2 380
395#define __NR_pwritev2 381
396#define __NR_kexec_file_load 382
397#define __NR_statx 383
398#define __NR_pkey_alloc 384
399#define __NR_pkey_free 385
400#define __NR_pkey_mprotect 386
401#define __NR_rseq 387
402#define __NR_io_pgetevents 388
403
404#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000000..0b3cb52fd29d
--- /dev/null
+++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Copyright (C) 2015 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
19#define _UAPI_ASM_RISCV_BITSPERLONG_H
20
21#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
22
23#include <asm-generic/bitsperlong.h>
24
25#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 28c4a502b419..6d6122524711 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -281,9 +281,11 @@
281#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ 281#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
282#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ 282#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
283#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ 283#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
284#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */
284#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ 285#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
285#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ 286#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
286#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ 287#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
288#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
287#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ 289#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
288#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ 290#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
289#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ 291#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 33833d1909af..a5ea841cc6d2 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) 16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
17#endif 17#endif
18 18
19#ifdef CONFIG_X86_SMAP
20# define DISABLE_SMAP 0
21#else
22# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
23#endif
24
19#ifdef CONFIG_X86_INTEL_UMIP 25#ifdef CONFIG_X86_INTEL_UMIP
20# define DISABLE_UMIP 0 26# define DISABLE_UMIP 0
21#else 27#else
@@ -68,7 +74,7 @@
68#define DISABLED_MASK6 0 74#define DISABLED_MASK6 0
69#define DISABLED_MASK7 (DISABLE_PTI) 75#define DISABLED_MASK7 (DISABLE_PTI)
70#define DISABLED_MASK8 0 76#define DISABLED_MASK8 0
71#define DISABLED_MASK9 (DISABLE_MPX) 77#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP)
72#define DISABLED_MASK10 0 78#define DISABLED_MASK10 0
73#define DISABLED_MASK11 0 79#define DISABLED_MASK11 0
74#define DISABLED_MASK12 0 80#define DISABLED_MASK12 0
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 492f0f24e2d3..4ad1f0894d53 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c
93SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) 93SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c))
94 94
95ifeq ($(feature-libbfd),1) 95ifeq ($(feature-libbfd),1)
96 LIBS += -lbfd -ldl -lopcodes
97else ifeq ($(feature-libbfd-liberty),1)
98 LIBS += -lbfd -ldl -lopcodes -liberty
99else ifeq ($(feature-libbfd-liberty-z),1)
100 LIBS += -lbfd -ldl -lopcodes -liberty -lz
101endif
102
103ifneq ($(filter -lbfd,$(LIBS)),)
96CFLAGS += -DHAVE_LIBBFD_SUPPORT 104CFLAGS += -DHAVE_LIBBFD_SUPPORT
97SRCS += $(BFD_SRCS) 105SRCS += $(BFD_SRCS)
98LIBS += -lbfd -lopcodes
99endif 106endif
100 107
101OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o 108OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 3f0629edbca5..6ba5f567a9d8 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -82,8 +82,6 @@ static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
82 int bits_to_copy; 82 int bits_to_copy;
83 __u64 print_num; 83 __u64 print_num;
84 84
85 data += BITS_ROUNDDOWN_BYTES(bit_offset);
86 bit_offset = BITS_PER_BYTE_MASKED(bit_offset);
87 bits_to_copy = bit_offset + nr_bits; 85 bits_to_copy = bit_offset + nr_bits;
88 bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); 86 bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
89 87
@@ -118,7 +116,9 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
118 * BTF_INT_OFFSET() cannot exceed 64 bits. 116 * BTF_INT_OFFSET() cannot exceed 64 bits.
119 */ 117 */
120 total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); 118 total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
121 btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw, 119 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
120 bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
121 btf_dumper_bitfield(nr_bits, bit_offset, data, jw,
122 is_plain_text); 122 is_plain_text);
123} 123}
124 124
@@ -216,11 +216,12 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
216 } 216 }
217 217
218 jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); 218 jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
219 data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
219 if (bitfield_size) { 220 if (bitfield_size) {
220 btf_dumper_bitfield(bitfield_size, bit_offset, 221 btf_dumper_bitfield(bitfield_size,
221 data, d->jw, d->is_plain_text); 222 BITS_PER_BYTE_MASKED(bit_offset),
223 data_off, d->jw, d->is_plain_text);
222 } else { 224 } else {
223 data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
224 ret = btf_dumper_do_type(d, m[i].type, 225 ret = btf_dumper_do_type(d, m[i].type,
225 BITS_PER_BYTE_MASKED(bit_offset), 226 BITS_PER_BYTE_MASKED(bit_offset),
226 data_off); 227 data_off);
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 897483457bf0..f7261fad45c1 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key)
297 snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); 297 snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
298 298
299 fdi = fopen(path, "r"); 299 fdi = fopen(path, "r");
300 if (!fdi) { 300 if (!fdi)
301 p_err("can't open fdinfo: %s", strerror(errno));
302 return NULL; 301 return NULL;
303 }
304 302
305 while ((n = getline(&line, &line_n, fdi)) > 0) { 303 while ((n = getline(&line, &line_n, fdi)) > 0) {
306 char *value; 304 char *value;
@@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key)
313 311
314 value = strchr(line, '\t'); 312 value = strchr(line, '\t');
315 if (!value || !value[1]) { 313 if (!value || !value[1]) {
316 p_err("malformed fdinfo!?");
317 free(line); 314 free(line);
318 return NULL; 315 return NULL;
319 } 316 }
@@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key)
326 return line; 323 return line;
327 } 324 }
328 325
329 p_err("key '%s' not found in fdinfo", key);
330 free(line); 326 free(line);
331 fclose(fdi); 327 fclose(fdi);
332 return NULL; 328 return NULL;
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index bff7ee026680..6046dcab51cc 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -1,15 +1,10 @@
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 1// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
2/* 2/*
3 * Simple streaming JSON writer 3 * Simple streaming JSON writer
4 * 4 *
5 * This takes care of the annoying bits of JSON syntax like the commas 5 * This takes care of the annoying bits of JSON syntax like the commas
6 * after elements 6 * after elements
7 * 7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Authors: Stephen Hemminger <stephen@networkplumber.org> 8 * Authors: Stephen Hemminger <stephen@networkplumber.org>
14 */ 9 */
15 10
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
index c1ab51aed99c..cb9a1993681c 100644
--- a/tools/bpf/bpftool/json_writer.h
+++ b/tools/bpf/bpftool/json_writer.h
@@ -5,11 +5,6 @@
5 * This takes care of the annoying bits of JSON syntax like the commas 5 * This takes care of the annoying bits of JSON syntax like the commas
6 * after elements 6 * after elements
7 * 7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Authors: Stephen Hemminger <stephen@networkplumber.org> 8 * Authors: Stephen Hemminger <stephen@networkplumber.org>
14 */ 9 */
15 10
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 2037e3dc864b..1ef1ee2280a2 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -347,6 +347,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val,
347 return argv + i; 347 return argv + i;
348} 348}
349 349
350/* on per cpu maps we must copy the provided value on all value instances */
351static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
352{
353 unsigned int i, n, step;
354
355 if (!map_is_per_cpu(info->type))
356 return;
357
358 n = get_possible_cpus();
359 step = round_up(info->value_size, 8);
360 for (i = 1; i < n; i++)
361 memcpy(value + i * step, value, info->value_size);
362}
363
350static int parse_elem(char **argv, struct bpf_map_info *info, 364static int parse_elem(char **argv, struct bpf_map_info *info,
351 void *key, void *value, __u32 key_size, __u32 value_size, 365 void *key, void *value, __u32 key_size, __u32 value_size,
352 __u32 *flags, __u32 **value_fd) 366 __u32 *flags, __u32 **value_fd)
@@ -426,6 +440,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
426 argv = parse_bytes(argv, "value", value, value_size); 440 argv = parse_bytes(argv, "value", value, value_size);
427 if (!argv) 441 if (!argv)
428 return -1; 442 return -1;
443
444 fill_per_cpu_value(info, value);
429 } 445 }
430 446
431 return parse_elem(argv, info, key, NULL, key_size, value_size, 447 return parse_elem(argv, info, key, NULL, key_size, value_size,
@@ -497,10 +513,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
497 jsonw_uint_field(json_wtr, "owner_prog_type", 513 jsonw_uint_field(json_wtr, "owner_prog_type",
498 prog_type); 514 prog_type);
499 } 515 }
500 if (atoi(owner_jited)) 516 if (owner_jited)
501 jsonw_bool_field(json_wtr, "owner_jited", true); 517 jsonw_bool_field(json_wtr, "owner_jited",
502 else 518 !!atoi(owner_jited));
503 jsonw_bool_field(json_wtr, "owner_jited", false);
504 519
505 free(owner_prog_type); 520 free(owner_prog_type);
506 free(owner_jited); 521 free(owner_jited);
@@ -553,7 +568,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
553 char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); 568 char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
554 char *owner_jited = get_fdinfo(fd, "owner_jited"); 569 char *owner_jited = get_fdinfo(fd, "owner_jited");
555 570
556 printf("\n\t"); 571 if (owner_prog_type || owner_jited)
572 printf("\n\t");
557 if (owner_prog_type) { 573 if (owner_prog_type) {
558 unsigned int prog_type = atoi(owner_prog_type); 574 unsigned int prog_type = atoi(owner_prog_type);
559 575
@@ -563,10 +579,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
563 else 579 else
564 printf("owner_prog_type %d ", prog_type); 580 printf("owner_prog_type %d ", prog_type);
565 } 581 }
566 if (atoi(owner_jited)) 582 if (owner_jited)
567 printf("owner jited"); 583 printf("owner%s jited",
568 else 584 atoi(owner_jited) ? "" : " not");
569 printf("owner not jited");
570 585
571 free(owner_prog_type); 586 free(owner_prog_type);
572 free(owner_jited); 587 free(owner_jited);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 2d1bb7d6ff51..b54ed82b9589 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
78 78
79static int prog_fd_by_tag(unsigned char *tag) 79static int prog_fd_by_tag(unsigned char *tag)
80{ 80{
81 struct bpf_prog_info info = {};
82 __u32 len = sizeof(info);
83 unsigned int id = 0; 81 unsigned int id = 0;
84 int err; 82 int err;
85 int fd; 83 int fd;
86 84
87 while (true) { 85 while (true) {
86 struct bpf_prog_info info = {};
87 __u32 len = sizeof(info);
88
88 err = bpf_prog_get_next_id(id, &id); 89 err = bpf_prog_get_next_id(id, &id);
89 if (err) { 90 if (err) {
90 p_err("%s", strerror(errno)); 91 p_err("%s", strerror(errno));
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 3040830d7797..84545666a09c 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -330,7 +330,7 @@ static const struct option longopts[] = {
330 330
331int main(int argc, char **argv) 331int main(int argc, char **argv)
332{ 332{
333 unsigned long long num_loops = 2; 333 long long num_loops = 2;
334 unsigned long timedelay = 1000000; 334 unsigned long timedelay = 1000000;
335 unsigned long buf_len = 128; 335 unsigned long buf_len = 128;
336 336
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index c7f3321fbe43..d90127298f12 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -738,9 +738,11 @@ __SYSCALL(__NR_statx, sys_statx)
738__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) 738__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
739#define __NR_rseq 293 739#define __NR_rseq 293
740__SYSCALL(__NR_rseq, sys_rseq) 740__SYSCALL(__NR_rseq, sys_rseq)
741#define __NR_kexec_file_load 294
742__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
741 743
742#undef __NR_syscalls 744#undef __NR_syscalls
743#define __NR_syscalls 294 745#define __NR_syscalls 295
744 746
745/* 747/*
746 * 32 bit systems traditionally used different 748 * 32 bit systems traditionally used different
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index 8dd6aefdafa4..57aaeaf8e192 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -13,6 +13,10 @@
13#include "../../arch/mips/include/uapi/asm/bitsperlong.h" 13#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
14#elif defined(__ia64__) 14#elif defined(__ia64__)
15#include "../../arch/ia64/include/uapi/asm/bitsperlong.h" 15#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
16#elif defined(__riscv)
17#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
18#elif defined(__alpha__)
19#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
16#else 20#else
17#include <asm-generic/bitsperlong.h> 21#include <asm-generic/bitsperlong.h>
18#endif 22#endif
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index a4446f452040..298b2e197744 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait {
412 int irq_seq; 412 int irq_seq;
413} drm_i915_irq_wait_t; 413} drm_i915_irq_wait_t;
414 414
415/*
416 * Different modes of per-process Graphics Translation Table,
417 * see I915_PARAM_HAS_ALIASING_PPGTT
418 */
419#define I915_GEM_PPGTT_NONE 0
420#define I915_GEM_PPGTT_ALIASING 1
421#define I915_GEM_PPGTT_FULL 2
422
415/* Ioctl to query kernel params: 423/* Ioctl to query kernel params:
416 */ 424 */
417#define I915_PARAM_IRQ_ACTIVE 1 425#define I915_PARAM_IRQ_ACTIVE 1
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index a441ea1bfe6d..121e82ce296b 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -14,6 +14,11 @@
14#include <linux/ioctl.h> 14#include <linux/ioctl.h>
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */
18#if !defined(__KERNEL__)
19#include <linux/mount.h>
20#endif
21
17/* 22/*
18 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change 23 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
19 * the file limit at runtime and only root can increase the per-process 24 * the file limit at runtime and only root can increase the per-process
@@ -101,57 +106,6 @@ struct inodes_stat_t {
101 106
102#define NR_FILE 8192 /* this can well be larger on a larger system */ 107#define NR_FILE 8192 /* this can well be larger on a larger system */
103 108
104
105/*
106 * These are the fs-independent mount-flags: up to 32 flags are supported
107 */
108#define MS_RDONLY 1 /* Mount read-only */
109#define MS_NOSUID 2 /* Ignore suid and sgid bits */
110#define MS_NODEV 4 /* Disallow access to device special files */
111#define MS_NOEXEC 8 /* Disallow program execution */
112#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
113#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
114#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
115#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
116#define MS_NOATIME 1024 /* Do not update access times. */
117#define MS_NODIRATIME 2048 /* Do not update directory access times */
118#define MS_BIND 4096
119#define MS_MOVE 8192
120#define MS_REC 16384
121#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
122 MS_VERBOSE is deprecated. */
123#define MS_SILENT 32768
124#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
125#define MS_UNBINDABLE (1<<17) /* change to unbindable */
126#define MS_PRIVATE (1<<18) /* change to private */
127#define MS_SLAVE (1<<19) /* change to slave */
128#define MS_SHARED (1<<20) /* change to shared */
129#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
130#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
131#define MS_I_VERSION (1<<23) /* Update inode I_version field */
132#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
133#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
134
135/* These sb flags are internal to the kernel */
136#define MS_SUBMOUNT (1<<26)
137#define MS_NOREMOTELOCK (1<<27)
138#define MS_NOSEC (1<<28)
139#define MS_BORN (1<<29)
140#define MS_ACTIVE (1<<30)
141#define MS_NOUSER (1<<31)
142
143/*
144 * Superblock flags that can be altered by MS_REMOUNT
145 */
146#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
147 MS_LAZYTIME)
148
149/*
150 * Old magic mount flag and mask
151 */
152#define MS_MGC_VAL 0xC0ED0000
153#define MS_MGC_MSK 0xffff0000
154
155/* 109/*
156 * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR. 110 * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR.
157 */ 111 */
@@ -269,7 +223,8 @@ struct fsxattr {
269#define FS_POLICY_FLAGS_PAD_16 0x02 223#define FS_POLICY_FLAGS_PAD_16 0x02
270#define FS_POLICY_FLAGS_PAD_32 0x03 224#define FS_POLICY_FLAGS_PAD_32 0x03
271#define FS_POLICY_FLAGS_PAD_MASK 0x03 225#define FS_POLICY_FLAGS_PAD_MASK 0x03
272#define FS_POLICY_FLAGS_VALID 0x03 226#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */
227#define FS_POLICY_FLAGS_VALID 0x07
273 228
274/* Encryption algorithms */ 229/* Encryption algorithms */
275#define FS_ENCRYPTION_MODE_INVALID 0 230#define FS_ENCRYPTION_MODE_INVALID 0
@@ -281,6 +236,7 @@ struct fsxattr {
281#define FS_ENCRYPTION_MODE_AES_128_CTS 6 236#define FS_ENCRYPTION_MODE_AES_128_CTS 6
282#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ 237#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
283#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ 238#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
239#define FS_ENCRYPTION_MODE_ADIANTUM 9
284 240
285struct fscrypt_policy { 241struct fscrypt_policy {
286 __u8 version; 242 __u8 version;
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 1debfa42cba1..d6533828123a 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -288,6 +288,7 @@ enum {
288 IFLA_BR_MCAST_IGMP_VERSION, 288 IFLA_BR_MCAST_IGMP_VERSION,
289 IFLA_BR_MCAST_MLD_VERSION, 289 IFLA_BR_MCAST_MLD_VERSION,
290 IFLA_BR_VLAN_STATS_PER_PORT, 290 IFLA_BR_VLAN_STATS_PER_PORT,
291 IFLA_BR_MULTI_BOOLOPT,
291 __IFLA_BR_MAX, 292 __IFLA_BR_MAX,
292}; 293};
293 294
@@ -533,6 +534,7 @@ enum {
533 IFLA_VXLAN_LABEL, 534 IFLA_VXLAN_LABEL,
534 IFLA_VXLAN_GPE, 535 IFLA_VXLAN_GPE,
535 IFLA_VXLAN_TTL_INHERIT, 536 IFLA_VXLAN_TTL_INHERIT,
537 IFLA_VXLAN_DF,
536 __IFLA_VXLAN_MAX 538 __IFLA_VXLAN_MAX
537}; 539};
538#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) 540#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -542,6 +544,14 @@ struct ifla_vxlan_port_range {
542 __be16 high; 544 __be16 high;
543}; 545};
544 546
547enum ifla_vxlan_df {
548 VXLAN_DF_UNSET = 0,
549 VXLAN_DF_SET,
550 VXLAN_DF_INHERIT,
551 __VXLAN_DF_END,
552 VXLAN_DF_MAX = __VXLAN_DF_END - 1,
553};
554
545/* GENEVE section */ 555/* GENEVE section */
546enum { 556enum {
547 IFLA_GENEVE_UNSPEC, 557 IFLA_GENEVE_UNSPEC,
@@ -557,10 +567,19 @@ enum {
557 IFLA_GENEVE_UDP_ZERO_CSUM6_RX, 567 IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
558 IFLA_GENEVE_LABEL, 568 IFLA_GENEVE_LABEL,
559 IFLA_GENEVE_TTL_INHERIT, 569 IFLA_GENEVE_TTL_INHERIT,
570 IFLA_GENEVE_DF,
560 __IFLA_GENEVE_MAX 571 __IFLA_GENEVE_MAX
561}; 572};
562#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) 573#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
563 574
575enum ifla_geneve_df {
576 GENEVE_DF_UNSET = 0,
577 GENEVE_DF_SET,
578 GENEVE_DF_INHERIT,
579 __GENEVE_DF_END,
580 GENEVE_DF_MAX = __GENEVE_DF_END - 1,
581};
582
564/* PPP section */ 583/* PPP section */
565enum { 584enum {
566 IFLA_PPP_UNSPEC, 585 IFLA_PPP_UNSPEC,
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index 48e8a225b985..a55cb8b10165 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -266,10 +266,14 @@ struct sockaddr_in {
266 266
267#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000) 267#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
268#define IN_MULTICAST(a) IN_CLASSD(a) 268#define IN_MULTICAST(a) IN_CLASSD(a)
269#define IN_MULTICAST_NET 0xF0000000 269#define IN_MULTICAST_NET 0xe0000000
270 270
271#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) 271#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff)
272#define IN_BADCLASS(a) IN_EXPERIMENTAL((a)) 272#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
273
274#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
275#define IN_CLASSE_NET 0xffffffff
276#define IN_CLASSE_NSHIFT 0
273 277
274/* Address to accept any incoming messages. */ 278/* Address to accept any incoming messages. */
275#define INADDR_ANY ((unsigned long int) 0x00000000) 279#define INADDR_ANY ((unsigned long int) 0x00000000)
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 2b7a652c9fa4..6d4ea4b6c922 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -492,6 +492,17 @@ struct kvm_dirty_log {
492 }; 492 };
493}; 493};
494 494
495/* for KVM_CLEAR_DIRTY_LOG */
496struct kvm_clear_dirty_log {
497 __u32 slot;
498 __u32 num_pages;
499 __u64 first_page;
500 union {
501 void __user *dirty_bitmap; /* one bit per page */
502 __u64 padding2;
503 };
504};
505
495/* for KVM_SET_SIGNAL_MASK */ 506/* for KVM_SET_SIGNAL_MASK */
496struct kvm_signal_mask { 507struct kvm_signal_mask {
497 __u32 len; 508 __u32 len;
@@ -975,6 +986,8 @@ struct kvm_ppc_resize_hpt {
975#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 986#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
976#define KVM_CAP_EXCEPTION_PAYLOAD 164 987#define KVM_CAP_EXCEPTION_PAYLOAD 164
977#define KVM_CAP_ARM_VM_IPA_SIZE 165 988#define KVM_CAP_ARM_VM_IPA_SIZE 165
989#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166
990#define KVM_CAP_HYPERV_CPUID 167
978 991
979#ifdef KVM_CAP_IRQ_ROUTING 992#ifdef KVM_CAP_IRQ_ROUTING
980 993
@@ -1421,6 +1434,12 @@ struct kvm_enc_region {
1421#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state) 1434#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
1422#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state) 1435#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
1423 1436
1437/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */
1438#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
1439
1440/* Available with KVM_CAP_HYPERV_CPUID */
1441#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
1442
1424/* Secure Encrypted Virtualization command */ 1443/* Secure Encrypted Virtualization command */
1425enum sev_cmd_id { 1444enum sev_cmd_id {
1426 /* Guest initialization commands */ 1445 /* Guest initialization commands */
diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h
new file mode 100644
index 000000000000..3f9ec42510b0
--- /dev/null
+++ b/tools/include/uapi/linux/mount.h
@@ -0,0 +1,58 @@
1#ifndef _UAPI_LINUX_MOUNT_H
2#define _UAPI_LINUX_MOUNT_H
3
4/*
5 * These are the fs-independent mount-flags: up to 32 flags are supported
6 *
7 * Usage of these is restricted within the kernel to core mount(2) code and
8 * callers of sys_mount() only. Filesystems should be using the SB_*
9 * equivalent instead.
10 */
11#define MS_RDONLY 1 /* Mount read-only */
12#define MS_NOSUID 2 /* Ignore suid and sgid bits */
13#define MS_NODEV 4 /* Disallow access to device special files */
14#define MS_NOEXEC 8 /* Disallow program execution */
15#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
16#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
17#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
18#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
19#define MS_NOATIME 1024 /* Do not update access times. */
20#define MS_NODIRATIME 2048 /* Do not update directory access times */
21#define MS_BIND 4096
22#define MS_MOVE 8192
23#define MS_REC 16384
24#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
25 MS_VERBOSE is deprecated. */
26#define MS_SILENT 32768
27#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
28#define MS_UNBINDABLE (1<<17) /* change to unbindable */
29#define MS_PRIVATE (1<<18) /* change to private */
30#define MS_SLAVE (1<<19) /* change to slave */
31#define MS_SHARED (1<<20) /* change to shared */
32#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
33#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
34#define MS_I_VERSION (1<<23) /* Update inode I_version field */
35#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
36#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
37
38/* These sb flags are internal to the kernel */
39#define MS_SUBMOUNT (1<<26)
40#define MS_NOREMOTELOCK (1<<27)
41#define MS_NOSEC (1<<28)
42#define MS_BORN (1<<29)
43#define MS_ACTIVE (1<<30)
44#define MS_NOUSER (1<<31)
45
46/*
47 * Superblock flags that can be altered by MS_REMOUNT
48 */
49#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
50 MS_LAZYTIME)
51
52/*
53 * Old magic mount flag and mask
54 */
55#define MS_MGC_VAL 0xC0ED0000
56#define MS_MGC_MSK 0xffff0000
57
58#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h
new file mode 100644
index 000000000000..0d18b1d1fbbc
--- /dev/null
+++ b/tools/include/uapi/linux/pkt_sched.h
@@ -0,0 +1,1163 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __LINUX_PKT_SCHED_H
3#define __LINUX_PKT_SCHED_H
4
5#include <linux/types.h>
6
7/* Logical priority bands not depending on specific packet scheduler.
8 Every scheduler will map them to real traffic classes, if it has
9 no more precise mechanism to classify packets.
10
11 These numbers have no special meaning, though their coincidence
12 with obsolete IPv6 values is not occasional :-). New IPv6 drafts
13 preferred full anarchy inspired by diffserv group.
14
15 Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
16 class, actually, as rule it will be handled with more care than
17 filler or even bulk.
18 */
19
20#define TC_PRIO_BESTEFFORT 0
21#define TC_PRIO_FILLER 1
22#define TC_PRIO_BULK 2
23#define TC_PRIO_INTERACTIVE_BULK 4
24#define TC_PRIO_INTERACTIVE 6
25#define TC_PRIO_CONTROL 7
26
27#define TC_PRIO_MAX 15
28
29/* Generic queue statistics, available for all the elements.
30 Particular schedulers may have also their private records.
31 */
32
33struct tc_stats {
34 __u64 bytes; /* Number of enqueued bytes */
35 __u32 packets; /* Number of enqueued packets */
36 __u32 drops; /* Packets dropped because of lack of resources */
37 __u32 overlimits; /* Number of throttle events when this
38 * flow goes out of allocated bandwidth */
39 __u32 bps; /* Current flow byte rate */
40 __u32 pps; /* Current flow packet rate */
41 __u32 qlen;
42 __u32 backlog;
43};
44
45struct tc_estimator {
46 signed char interval;
47 unsigned char ewma_log;
48};
49
50/* "Handles"
51 ---------
52
53 All the traffic control objects have 32bit identifiers, or "handles".
54
55 They can be considered as opaque numbers from user API viewpoint,
56 but actually they always consist of two fields: major and
57 minor numbers, which are interpreted by kernel specially,
58 that may be used by applications, though not recommended.
59
60 F.e. qdisc handles always have minor number equal to zero,
61 classes (or flows) have major equal to parent qdisc major, and
62 minor uniquely identifying class inside qdisc.
63
64 Macros to manipulate handles:
65 */
66
67#define TC_H_MAJ_MASK (0xFFFF0000U)
68#define TC_H_MIN_MASK (0x0000FFFFU)
69#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
70#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
71#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
72
73#define TC_H_UNSPEC (0U)
74#define TC_H_ROOT (0xFFFFFFFFU)
75#define TC_H_INGRESS (0xFFFFFFF1U)
76#define TC_H_CLSACT TC_H_INGRESS
77
78#define TC_H_MIN_PRIORITY 0xFFE0U
79#define TC_H_MIN_INGRESS 0xFFF2U
80#define TC_H_MIN_EGRESS 0xFFF3U
81
82/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
83enum tc_link_layer {
84 TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
85 TC_LINKLAYER_ETHERNET,
86 TC_LINKLAYER_ATM,
87};
88#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
89
90struct tc_ratespec {
91 unsigned char cell_log;
92 __u8 linklayer; /* lower 4 bits */
93 unsigned short overhead;
94 short cell_align;
95 unsigned short mpu;
96 __u32 rate;
97};
98
99#define TC_RTAB_SIZE 1024
100
101struct tc_sizespec {
102 unsigned char cell_log;
103 unsigned char size_log;
104 short cell_align;
105 int overhead;
106 unsigned int linklayer;
107 unsigned int mpu;
108 unsigned int mtu;
109 unsigned int tsize;
110};
111
112enum {
113 TCA_STAB_UNSPEC,
114 TCA_STAB_BASE,
115 TCA_STAB_DATA,
116 __TCA_STAB_MAX
117};
118
119#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
120
121/* FIFO section */
122
123struct tc_fifo_qopt {
124 __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
125};
126
127/* SKBPRIO section */
128
129/*
130 * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
131 * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
132 * to map one to one the DS field of IPV4 and IPV6 headers.
133 * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
134 */
135
136#define SKBPRIO_MAX_PRIORITY 64
137
138struct tc_skbprio_qopt {
139 __u32 limit; /* Queue length in packets. */
140};
141
142/* PRIO section */
143
144#define TCQ_PRIO_BANDS 16
145#define TCQ_MIN_PRIO_BANDS 2
146
147struct tc_prio_qopt {
148 int bands; /* Number of bands */
149 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
150};
151
152/* MULTIQ section */
153
154struct tc_multiq_qopt {
155 __u16 bands; /* Number of bands */
156 __u16 max_bands; /* Maximum number of queues */
157};
158
159/* PLUG section */
160
161#define TCQ_PLUG_BUFFER 0
162#define TCQ_PLUG_RELEASE_ONE 1
163#define TCQ_PLUG_RELEASE_INDEFINITE 2
164#define TCQ_PLUG_LIMIT 3
165
166struct tc_plug_qopt {
167 /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
168 * buffer any incoming packets
169 * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
170 * to beginning of the next plug.
171 * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
172 * Stop buffering packets until the next TCQ_PLUG_BUFFER
173 * command is received (just act as a pass-thru queue).
174 * TCQ_PLUG_LIMIT: Increase/decrease queue size
175 */
176 int action;
177 __u32 limit;
178};
179
180/* TBF section */
181
182struct tc_tbf_qopt {
183 struct tc_ratespec rate;
184 struct tc_ratespec peakrate;
185 __u32 limit;
186 __u32 buffer;
187 __u32 mtu;
188};
189
190enum {
191 TCA_TBF_UNSPEC,
192 TCA_TBF_PARMS,
193 TCA_TBF_RTAB,
194 TCA_TBF_PTAB,
195 TCA_TBF_RATE64,
196 TCA_TBF_PRATE64,
197 TCA_TBF_BURST,
198 TCA_TBF_PBURST,
199 TCA_TBF_PAD,
200 __TCA_TBF_MAX,
201};
202
203#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
204
205
206/* TEQL section */
207
208/* TEQL does not require any parameters */
209
210/* SFQ section */
211
212struct tc_sfq_qopt {
213 unsigned quantum; /* Bytes per round allocated to flow */
214 int perturb_period; /* Period of hash perturbation */
215 __u32 limit; /* Maximal packets in queue */
216 unsigned divisor; /* Hash divisor */
217 unsigned flows; /* Maximal number of flows */
218};
219
220struct tc_sfqred_stats {
221 __u32 prob_drop; /* Early drops, below max threshold */
222 __u32 forced_drop; /* Early drops, after max threshold */
223 __u32 prob_mark; /* Marked packets, below max threshold */
224 __u32 forced_mark; /* Marked packets, after max threshold */
225 __u32 prob_mark_head; /* Marked packets, below max threshold */
226 __u32 forced_mark_head;/* Marked packets, after max threshold */
227};
228
229struct tc_sfq_qopt_v1 {
230 struct tc_sfq_qopt v0;
231 unsigned int depth; /* max number of packets per flow */
232 unsigned int headdrop;
233/* SFQRED parameters */
234 __u32 limit; /* HARD maximal flow queue length (bytes) */
235 __u32 qth_min; /* Min average length threshold (bytes) */
236 __u32 qth_max; /* Max average length threshold (bytes) */
237 unsigned char Wlog; /* log(W) */
238 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
239 unsigned char Scell_log; /* cell size for idle damping */
240 unsigned char flags;
241 __u32 max_P; /* probability, high resolution */
242/* SFQRED stats */
243 struct tc_sfqred_stats stats;
244};
245
246
247struct tc_sfq_xstats {
248 __s32 allot;
249};
250
251/* RED section */
252
253enum {
254 TCA_RED_UNSPEC,
255 TCA_RED_PARMS,
256 TCA_RED_STAB,
257 TCA_RED_MAX_P,
258 __TCA_RED_MAX,
259};
260
261#define TCA_RED_MAX (__TCA_RED_MAX - 1)
262
263struct tc_red_qopt {
264 __u32 limit; /* HARD maximal queue length (bytes) */
265 __u32 qth_min; /* Min average length threshold (bytes) */
266 __u32 qth_max; /* Max average length threshold (bytes) */
267 unsigned char Wlog; /* log(W) */
268 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
269 unsigned char Scell_log; /* cell size for idle damping */
270 unsigned char flags;
271#define TC_RED_ECN 1
272#define TC_RED_HARDDROP 2
273#define TC_RED_ADAPTATIVE 4
274};
275
276struct tc_red_xstats {
277 __u32 early; /* Early drops */
278 __u32 pdrop; /* Drops due to queue limits */
279 __u32 other; /* Drops due to drop() calls */
280 __u32 marked; /* Marked packets */
281};
282
283/* GRED section */
284
285#define MAX_DPs 16
286
287enum {
288 TCA_GRED_UNSPEC,
289 TCA_GRED_PARMS,
290 TCA_GRED_STAB,
291 TCA_GRED_DPS,
292 TCA_GRED_MAX_P,
293 TCA_GRED_LIMIT,
294 TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
295 __TCA_GRED_MAX,
296};
297
298#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
299
300enum {
301 TCA_GRED_VQ_ENTRY_UNSPEC,
302 TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
303 __TCA_GRED_VQ_ENTRY_MAX,
304};
305#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
306
307enum {
308 TCA_GRED_VQ_UNSPEC,
309 TCA_GRED_VQ_PAD,
310 TCA_GRED_VQ_DP, /* u32 */
311 TCA_GRED_VQ_STAT_BYTES, /* u64 */
312 TCA_GRED_VQ_STAT_PACKETS, /* u32 */
313 TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
314 TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
315 TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
316 TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
317 TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
318 TCA_GRED_VQ_STAT_PDROP, /* u32 */
319 TCA_GRED_VQ_STAT_OTHER, /* u32 */
320 TCA_GRED_VQ_FLAGS, /* u32 */
321 __TCA_GRED_VQ_MAX
322};
323
324#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
325
326struct tc_gred_qopt {
327 __u32 limit; /* HARD maximal queue length (bytes) */
328 __u32 qth_min; /* Min average length threshold (bytes) */
329 __u32 qth_max; /* Max average length threshold (bytes) */
330 __u32 DP; /* up to 2^32 DPs */
331 __u32 backlog;
332 __u32 qave;
333 __u32 forced;
334 __u32 early;
335 __u32 other;
336 __u32 pdrop;
337 __u8 Wlog; /* log(W) */
338 __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
339 __u8 Scell_log; /* cell size for idle damping */
340 __u8 prio; /* prio of this VQ */
341 __u32 packets;
342 __u32 bytesin;
343};
344
345/* gred setup */
346struct tc_gred_sopt {
347 __u32 DPs;
348 __u32 def_DP;
349 __u8 grio;
350 __u8 flags;
351 __u16 pad1;
352};
353
354/* CHOKe section */
355
356enum {
357 TCA_CHOKE_UNSPEC,
358 TCA_CHOKE_PARMS,
359 TCA_CHOKE_STAB,
360 TCA_CHOKE_MAX_P,
361 __TCA_CHOKE_MAX,
362};
363
364#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
365
366struct tc_choke_qopt {
367 __u32 limit; /* Hard queue length (packets) */
368 __u32 qth_min; /* Min average threshold (packets) */
369 __u32 qth_max; /* Max average threshold (packets) */
370 unsigned char Wlog; /* log(W) */
371 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
372 unsigned char Scell_log; /* cell size for idle damping */
373 unsigned char flags; /* see RED flags */
374};
375
376struct tc_choke_xstats {
377 __u32 early; /* Early drops */
378 __u32 pdrop; /* Drops due to queue limits */
379 __u32 other; /* Drops due to drop() calls */
380 __u32 marked; /* Marked packets */
381 __u32 matched; /* Drops due to flow match */
382};
383
384/* HTB section */
385#define TC_HTB_NUMPRIO 8
386#define TC_HTB_MAXDEPTH 8
387#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */
388
389struct tc_htb_opt {
390 struct tc_ratespec rate;
391 struct tc_ratespec ceil;
392 __u32 buffer;
393 __u32 cbuffer;
394 __u32 quantum;
395 __u32 level; /* out only */
396 __u32 prio;
397};
398struct tc_htb_glob {
399 __u32 version; /* to match HTB/TC */
400 __u32 rate2quantum; /* bps->quantum divisor */
401 __u32 defcls; /* default class number */
402 __u32 debug; /* debug flags */
403
404 /* stats */
405 __u32 direct_pkts; /* count of non shaped packets */
406};
407enum {
408 TCA_HTB_UNSPEC,
409 TCA_HTB_PARMS,
410 TCA_HTB_INIT,
411 TCA_HTB_CTAB,
412 TCA_HTB_RTAB,
413 TCA_HTB_DIRECT_QLEN,
414 TCA_HTB_RATE64,
415 TCA_HTB_CEIL64,
416 TCA_HTB_PAD,
417 __TCA_HTB_MAX,
418};
419
420#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
421
422struct tc_htb_xstats {
423 __u32 lends;
424 __u32 borrows;
425 __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
426 __s32 tokens;
427 __s32 ctokens;
428};
429
430/* HFSC section */
431
432struct tc_hfsc_qopt {
433 __u16 defcls; /* default class */
434};
435
436struct tc_service_curve {
437 __u32 m1; /* slope of the first segment in bps */
438 __u32 d; /* x-projection of the first segment in us */
439 __u32 m2; /* slope of the second segment in bps */
440};
441
442struct tc_hfsc_stats {
443 __u64 work; /* total work done */
444 __u64 rtwork; /* work done by real-time criteria */
445 __u32 period; /* current period */
446 __u32 level; /* class level in hierarchy */
447};
448
449enum {
450 TCA_HFSC_UNSPEC,
451 TCA_HFSC_RSC,
452 TCA_HFSC_FSC,
453 TCA_HFSC_USC,
454 __TCA_HFSC_MAX,
455};
456
457#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
458
459
460/* CBQ section */
461
462#define TC_CBQ_MAXPRIO 8
463#define TC_CBQ_MAXLEVEL 8
464#define TC_CBQ_DEF_EWMA 5
465
466struct tc_cbq_lssopt {
467 unsigned char change;
468 unsigned char flags;
469#define TCF_CBQ_LSS_BOUNDED 1
470#define TCF_CBQ_LSS_ISOLATED 2
471 unsigned char ewma_log;
472 unsigned char level;
473#define TCF_CBQ_LSS_FLAGS 1
474#define TCF_CBQ_LSS_EWMA 2
475#define TCF_CBQ_LSS_MAXIDLE 4
476#define TCF_CBQ_LSS_MINIDLE 8
477#define TCF_CBQ_LSS_OFFTIME 0x10
478#define TCF_CBQ_LSS_AVPKT 0x20
479 __u32 maxidle;
480 __u32 minidle;
481 __u32 offtime;
482 __u32 avpkt;
483};
484
485struct tc_cbq_wrropt {
486 unsigned char flags;
487 unsigned char priority;
488 unsigned char cpriority;
489 unsigned char __reserved;
490 __u32 allot;
491 __u32 weight;
492};
493
494struct tc_cbq_ovl {
495 unsigned char strategy;
496#define TC_CBQ_OVL_CLASSIC 0
497#define TC_CBQ_OVL_DELAY 1
498#define TC_CBQ_OVL_LOWPRIO 2
499#define TC_CBQ_OVL_DROP 3
500#define TC_CBQ_OVL_RCLASSIC 4
501 unsigned char priority2;
502 __u16 pad;
503 __u32 penalty;
504};
505
506struct tc_cbq_police {
507 unsigned char police;
508 unsigned char __res1;
509 unsigned short __res2;
510};
511
512struct tc_cbq_fopt {
513 __u32 split;
514 __u32 defmap;
515 __u32 defchange;
516};
517
518struct tc_cbq_xstats {
519 __u32 borrows;
520 __u32 overactions;
521 __s32 avgidle;
522 __s32 undertime;
523};
524
525enum {
526 TCA_CBQ_UNSPEC,
527 TCA_CBQ_LSSOPT,
528 TCA_CBQ_WRROPT,
529 TCA_CBQ_FOPT,
530 TCA_CBQ_OVL_STRATEGY,
531 TCA_CBQ_RATE,
532 TCA_CBQ_RTAB,
533 TCA_CBQ_POLICE,
534 __TCA_CBQ_MAX,
535};
536
537#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
538
539/* dsmark section */
540
541enum {
542 TCA_DSMARK_UNSPEC,
543 TCA_DSMARK_INDICES,
544 TCA_DSMARK_DEFAULT_INDEX,
545 TCA_DSMARK_SET_TC_INDEX,
546 TCA_DSMARK_MASK,
547 TCA_DSMARK_VALUE,
548 __TCA_DSMARK_MAX,
549};
550
551#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
552
553/* ATM section */
554
555enum {
556 TCA_ATM_UNSPEC,
557 TCA_ATM_FD, /* file/socket descriptor */
558 TCA_ATM_PTR, /* pointer to descriptor - later */
559 TCA_ATM_HDR, /* LL header */
560 TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
561 TCA_ATM_ADDR, /* PVC address (for output only) */
562 TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
563 __TCA_ATM_MAX,
564};
565
566#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
567
568/* Network emulator */
569
570enum {
571 TCA_NETEM_UNSPEC,
572 TCA_NETEM_CORR,
573 TCA_NETEM_DELAY_DIST,
574 TCA_NETEM_REORDER,
575 TCA_NETEM_CORRUPT,
576 TCA_NETEM_LOSS,
577 TCA_NETEM_RATE,
578 TCA_NETEM_ECN,
579 TCA_NETEM_RATE64,
580 TCA_NETEM_PAD,
581 TCA_NETEM_LATENCY64,
582 TCA_NETEM_JITTER64,
583 TCA_NETEM_SLOT,
584 TCA_NETEM_SLOT_DIST,
585 __TCA_NETEM_MAX,
586};
587
588#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
589
590struct tc_netem_qopt {
591 __u32 latency; /* added delay (us) */
592 __u32 limit; /* fifo limit (packets) */
593 __u32 loss; /* random packet loss (0=none ~0=100%) */
594 __u32 gap; /* re-ordering gap (0 for none) */
595 __u32 duplicate; /* random packet dup (0=none ~0=100%) */
596 __u32 jitter; /* random jitter in latency (us) */
597};
598
599struct tc_netem_corr {
600 __u32 delay_corr; /* delay correlation */
601 __u32 loss_corr; /* packet loss correlation */
602 __u32 dup_corr; /* duplicate correlation */
603};
604
605struct tc_netem_reorder {
606 __u32 probability;
607 __u32 correlation;
608};
609
610struct tc_netem_corrupt {
611 __u32 probability;
612 __u32 correlation;
613};
614
615struct tc_netem_rate {
616 __u32 rate; /* byte/s */
617 __s32 packet_overhead;
618 __u32 cell_size;
619 __s32 cell_overhead;
620};
621
622struct tc_netem_slot {
623 __s64 min_delay; /* nsec */
624 __s64 max_delay;
625 __s32 max_packets;
626 __s32 max_bytes;
627 __s64 dist_delay; /* nsec */
628 __s64 dist_jitter; /* nsec */
629};
630
631enum {
632 NETEM_LOSS_UNSPEC,
633 NETEM_LOSS_GI, /* General Intuitive - 4 state model */
634 NETEM_LOSS_GE, /* Gilbert Elliot models */
635 __NETEM_LOSS_MAX
636};
637#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
638
639/* State transition probabilities for 4 state model */
640struct tc_netem_gimodel {
641 __u32 p13;
642 __u32 p31;
643 __u32 p32;
644 __u32 p14;
645 __u32 p23;
646};
647
648/* Gilbert-Elliot models */
649struct tc_netem_gemodel {
650 __u32 p;
651 __u32 r;
652 __u32 h;
653 __u32 k1;
654};
655
656#define NETEM_DIST_SCALE 8192
657#define NETEM_DIST_MAX 16384
658
659/* DRR */
660
661enum {
662 TCA_DRR_UNSPEC,
663 TCA_DRR_QUANTUM,
664 __TCA_DRR_MAX
665};
666
667#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
668
669struct tc_drr_stats {
670 __u32 deficit;
671};
672
673/* MQPRIO */
674#define TC_QOPT_BITMASK 15
675#define TC_QOPT_MAX_QUEUE 16
676
677enum {
678 TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */
679 TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
680 __TC_MQPRIO_HW_OFFLOAD_MAX
681};
682
683#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
684
685enum {
686 TC_MQPRIO_MODE_DCB,
687 TC_MQPRIO_MODE_CHANNEL,
688 __TC_MQPRIO_MODE_MAX
689};
690
691#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
692
693enum {
694 TC_MQPRIO_SHAPER_DCB,
695 TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */
696 __TC_MQPRIO_SHAPER_MAX
697};
698
699#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
700
701struct tc_mqprio_qopt {
702 __u8 num_tc;
703 __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
704 __u8 hw;
705 __u16 count[TC_QOPT_MAX_QUEUE];
706 __u16 offset[TC_QOPT_MAX_QUEUE];
707};
708
709#define TC_MQPRIO_F_MODE 0x1
710#define TC_MQPRIO_F_SHAPER 0x2
711#define TC_MQPRIO_F_MIN_RATE 0x4
712#define TC_MQPRIO_F_MAX_RATE 0x8
713
714enum {
715 TCA_MQPRIO_UNSPEC,
716 TCA_MQPRIO_MODE,
717 TCA_MQPRIO_SHAPER,
718 TCA_MQPRIO_MIN_RATE64,
719 TCA_MQPRIO_MAX_RATE64,
720 __TCA_MQPRIO_MAX,
721};
722
723#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
724
725/* SFB */
726
727enum {
728 TCA_SFB_UNSPEC,
729 TCA_SFB_PARMS,
730 __TCA_SFB_MAX,
731};
732
733#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
734
735/*
736 * Note: increment, decrement are Q0.16 fixed-point values.
737 */
738struct tc_sfb_qopt {
739 __u32 rehash_interval; /* delay between hash move, in ms */
740 __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */
741 __u32 max; /* max len of qlen_min */
742 __u32 bin_size; /* maximum queue length per bin */
743 __u32 increment; /* probability increment, (d1 in Blue) */
744 __u32 decrement; /* probability decrement, (d2 in Blue) */
745 __u32 limit; /* max SFB queue length */
746 __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
747 __u32 penalty_burst;
748};
749
750struct tc_sfb_xstats {
751 __u32 earlydrop;
752 __u32 penaltydrop;
753 __u32 bucketdrop;
754 __u32 queuedrop;
755 __u32 childdrop; /* drops in child qdisc */
756 __u32 marked;
757 __u32 maxqlen;
758 __u32 maxprob;
759 __u32 avgprob;
760};
761
762#define SFB_MAX_PROB 0xFFFF
763
764/* QFQ */
765enum {
766 TCA_QFQ_UNSPEC,
767 TCA_QFQ_WEIGHT,
768 TCA_QFQ_LMAX,
769 __TCA_QFQ_MAX
770};
771
772#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1)
773
774struct tc_qfq_stats {
775 __u32 weight;
776 __u32 lmax;
777};
778
779/* CODEL */
780
781enum {
782 TCA_CODEL_UNSPEC,
783 TCA_CODEL_TARGET,
784 TCA_CODEL_LIMIT,
785 TCA_CODEL_INTERVAL,
786 TCA_CODEL_ECN,
787 TCA_CODEL_CE_THRESHOLD,
788 __TCA_CODEL_MAX
789};
790
791#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
792
793struct tc_codel_xstats {
794 __u32 maxpacket; /* largest packet we've seen so far */
795 __u32 count; /* how many drops we've done since the last time we
796 * entered dropping state
797 */
798 __u32 lastcount; /* count at entry to dropping state */
799 __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
800 __s32 drop_next; /* time to drop next packet */
801 __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
802 __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
803 __u32 dropping; /* are we in dropping state ? */
804 __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
805};
806
807/* FQ_CODEL */
808
809enum {
810 TCA_FQ_CODEL_UNSPEC,
811 TCA_FQ_CODEL_TARGET,
812 TCA_FQ_CODEL_LIMIT,
813 TCA_FQ_CODEL_INTERVAL,
814 TCA_FQ_CODEL_ECN,
815 TCA_FQ_CODEL_FLOWS,
816 TCA_FQ_CODEL_QUANTUM,
817 TCA_FQ_CODEL_CE_THRESHOLD,
818 TCA_FQ_CODEL_DROP_BATCH_SIZE,
819 TCA_FQ_CODEL_MEMORY_LIMIT,
820 __TCA_FQ_CODEL_MAX
821};
822
823#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
824
825enum {
826 TCA_FQ_CODEL_XSTATS_QDISC,
827 TCA_FQ_CODEL_XSTATS_CLASS,
828};
829
830struct tc_fq_codel_qd_stats {
831 __u32 maxpacket; /* largest packet we've seen so far */
832 __u32 drop_overlimit; /* number of time max qdisc
833 * packet limit was hit
834 */
835 __u32 ecn_mark; /* number of packets we ECN marked
836 * instead of being dropped
837 */
838 __u32 new_flow_count; /* number of time packets
839 * created a 'new flow'
840 */
841 __u32 new_flows_len; /* count of flows in new list */
842 __u32 old_flows_len; /* count of flows in old list */
843 __u32 ce_mark; /* packets above ce_threshold */
844 __u32 memory_usage; /* in bytes */
845 __u32 drop_overmemory;
846};
847
848struct tc_fq_codel_cl_stats {
849 __s32 deficit;
850 __u32 ldelay; /* in-queue delay seen by most recently
851 * dequeued packet
852 */
853 __u32 count;
854 __u32 lastcount;
855 __u32 dropping;
856 __s32 drop_next;
857};
858
859struct tc_fq_codel_xstats {
860 __u32 type;
861 union {
862 struct tc_fq_codel_qd_stats qdisc_stats;
863 struct tc_fq_codel_cl_stats class_stats;
864 };
865};
866
867/* FQ */
868
869enum {
870 TCA_FQ_UNSPEC,
871
872 TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
873
874 TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
875
876 TCA_FQ_QUANTUM, /* RR quantum */
877
878 TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
879
880 TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
881
882 TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
883
884 TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
885
886 TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
887
888 TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
889
890 TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
891
892 TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
893
894 TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
895
896 __TCA_FQ_MAX
897};
898
899#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
900
901struct tc_fq_qd_stats {
902 __u64 gc_flows;
903 __u64 highprio_packets;
904 __u64 tcp_retrans;
905 __u64 throttled;
906 __u64 flows_plimit;
907 __u64 pkts_too_long;
908 __u64 allocation_errors;
909 __s64 time_next_delayed_flow;
910 __u32 flows;
911 __u32 inactive_flows;
912 __u32 throttled_flows;
913 __u32 unthrottle_latency_ns;
914 __u64 ce_mark; /* packets above ce_threshold */
915};
916
917/* Heavy-Hitter Filter */
918
919enum {
920 TCA_HHF_UNSPEC,
921 TCA_HHF_BACKLOG_LIMIT,
922 TCA_HHF_QUANTUM,
923 TCA_HHF_HH_FLOWS_LIMIT,
924 TCA_HHF_RESET_TIMEOUT,
925 TCA_HHF_ADMIT_BYTES,
926 TCA_HHF_EVICT_TIMEOUT,
927 TCA_HHF_NON_HH_WEIGHT,
928 __TCA_HHF_MAX
929};
930
931#define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
932
933struct tc_hhf_xstats {
934 __u32 drop_overlimit; /* number of times max qdisc packet limit
935 * was hit
936 */
937 __u32 hh_overlimit; /* number of times max heavy-hitters was hit */
938 __u32 hh_tot_count; /* number of captured heavy-hitters so far */
939 __u32 hh_cur_count; /* number of current heavy-hitters */
940};
941
942/* PIE */
943enum {
944 TCA_PIE_UNSPEC,
945 TCA_PIE_TARGET,
946 TCA_PIE_LIMIT,
947 TCA_PIE_TUPDATE,
948 TCA_PIE_ALPHA,
949 TCA_PIE_BETA,
950 TCA_PIE_ECN,
951 TCA_PIE_BYTEMODE,
952 __TCA_PIE_MAX
953};
954#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
955
956struct tc_pie_xstats {
957 __u32 prob; /* current probability */
958 __u32 delay; /* current delay in ms */
959 __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
960 __u32 packets_in; /* total number of packets enqueued */
961 __u32 dropped; /* packets dropped due to pie_action */
962 __u32 overlimit; /* dropped due to lack of space in queue */
963 __u32 maxq; /* maximum queue size */
964 __u32 ecn_mark; /* packets marked with ecn*/
965};
966
967/* CBS */
968struct tc_cbs_qopt {
969 __u8 offload;
970 __u8 _pad[3];
971 __s32 hicredit;
972 __s32 locredit;
973 __s32 idleslope;
974 __s32 sendslope;
975};
976
977enum {
978 TCA_CBS_UNSPEC,
979 TCA_CBS_PARMS,
980 __TCA_CBS_MAX,
981};
982
983#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
984
985
986/* ETF */
987struct tc_etf_qopt {
988 __s32 delta;
989 __s32 clockid;
990 __u32 flags;
991#define TC_ETF_DEADLINE_MODE_ON BIT(0)
992#define TC_ETF_OFFLOAD_ON BIT(1)
993};
994
995enum {
996 TCA_ETF_UNSPEC,
997 TCA_ETF_PARMS,
998 __TCA_ETF_MAX,
999};
1000
1001#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
1002
1003
1004/* CAKE */
1005enum {
1006 TCA_CAKE_UNSPEC,
1007 TCA_CAKE_PAD,
1008 TCA_CAKE_BASE_RATE64,
1009 TCA_CAKE_DIFFSERV_MODE,
1010 TCA_CAKE_ATM,
1011 TCA_CAKE_FLOW_MODE,
1012 TCA_CAKE_OVERHEAD,
1013 TCA_CAKE_RTT,
1014 TCA_CAKE_TARGET,
1015 TCA_CAKE_AUTORATE,
1016 TCA_CAKE_MEMORY,
1017 TCA_CAKE_NAT,
1018 TCA_CAKE_RAW,
1019 TCA_CAKE_WASH,
1020 TCA_CAKE_MPU,
1021 TCA_CAKE_INGRESS,
1022 TCA_CAKE_ACK_FILTER,
1023 TCA_CAKE_SPLIT_GSO,
1024 __TCA_CAKE_MAX
1025};
1026#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
1027
1028enum {
1029 __TCA_CAKE_STATS_INVALID,
1030 TCA_CAKE_STATS_PAD,
1031 TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
1032 TCA_CAKE_STATS_MEMORY_LIMIT,
1033 TCA_CAKE_STATS_MEMORY_USED,
1034 TCA_CAKE_STATS_AVG_NETOFF,
1035 TCA_CAKE_STATS_MIN_NETLEN,
1036 TCA_CAKE_STATS_MAX_NETLEN,
1037 TCA_CAKE_STATS_MIN_ADJLEN,
1038 TCA_CAKE_STATS_MAX_ADJLEN,
1039 TCA_CAKE_STATS_TIN_STATS,
1040 TCA_CAKE_STATS_DEFICIT,
1041 TCA_CAKE_STATS_COBALT_COUNT,
1042 TCA_CAKE_STATS_DROPPING,
1043 TCA_CAKE_STATS_DROP_NEXT_US,
1044 TCA_CAKE_STATS_P_DROP,
1045 TCA_CAKE_STATS_BLUE_TIMER_US,
1046 __TCA_CAKE_STATS_MAX
1047};
1048#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
1049
1050enum {
1051 __TCA_CAKE_TIN_STATS_INVALID,
1052 TCA_CAKE_TIN_STATS_PAD,
1053 TCA_CAKE_TIN_STATS_SENT_PACKETS,
1054 TCA_CAKE_TIN_STATS_SENT_BYTES64,
1055 TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
1056 TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
1057 TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
1058 TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
1059 TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
1060 TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
1061 TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
1062 TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
1063 TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
1064 TCA_CAKE_TIN_STATS_TARGET_US,
1065 TCA_CAKE_TIN_STATS_INTERVAL_US,
1066 TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
1067 TCA_CAKE_TIN_STATS_WAY_MISSES,
1068 TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
1069 TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
1070 TCA_CAKE_TIN_STATS_AVG_DELAY_US,
1071 TCA_CAKE_TIN_STATS_BASE_DELAY_US,
1072 TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
1073 TCA_CAKE_TIN_STATS_BULK_FLOWS,
1074 TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
1075 TCA_CAKE_TIN_STATS_MAX_SKBLEN,
1076 TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
1077 __TCA_CAKE_TIN_STATS_MAX
1078};
1079#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
1080#define TC_CAKE_MAX_TINS (8)
1081
1082enum {
1083 CAKE_FLOW_NONE = 0,
1084 CAKE_FLOW_SRC_IP,
1085 CAKE_FLOW_DST_IP,
1086 CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
1087 CAKE_FLOW_FLOWS,
1088 CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
1089 CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
1090 CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
1091 CAKE_FLOW_MAX,
1092};
1093
1094enum {
1095 CAKE_DIFFSERV_DIFFSERV3 = 0,
1096 CAKE_DIFFSERV_DIFFSERV4,
1097 CAKE_DIFFSERV_DIFFSERV8,
1098 CAKE_DIFFSERV_BESTEFFORT,
1099 CAKE_DIFFSERV_PRECEDENCE,
1100 CAKE_DIFFSERV_MAX
1101};
1102
1103enum {
1104 CAKE_ACK_NONE = 0,
1105 CAKE_ACK_FILTER,
1106 CAKE_ACK_AGGRESSIVE,
1107 CAKE_ACK_MAX
1108};
1109
1110enum {
1111 CAKE_ATM_NONE = 0,
1112 CAKE_ATM_ATM,
1113 CAKE_ATM_PTM,
1114 CAKE_ATM_MAX
1115};
1116
1117
1118/* TAPRIO */
1119enum {
1120 TC_TAPRIO_CMD_SET_GATES = 0x00,
1121 TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
1122 TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
1123};
1124
1125enum {
1126 TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
1127 TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
1128 TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
1129 TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
1130 TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
1131 __TCA_TAPRIO_SCHED_ENTRY_MAX,
1132};
1133#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
1134
1135/* The format for schedule entry list is:
1136 * [TCA_TAPRIO_SCHED_ENTRY_LIST]
1137 * [TCA_TAPRIO_SCHED_ENTRY]
1138 * [TCA_TAPRIO_SCHED_ENTRY_CMD]
1139 * [TCA_TAPRIO_SCHED_ENTRY_GATES]
1140 * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
1141 */
1142enum {
1143 TCA_TAPRIO_SCHED_UNSPEC,
1144 TCA_TAPRIO_SCHED_ENTRY,
1145 __TCA_TAPRIO_SCHED_MAX,
1146};
1147
1148#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
1149
1150enum {
1151 TCA_TAPRIO_ATTR_UNSPEC,
1152 TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
1153 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
1154 TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
1155 TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
1156 TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
1157 TCA_TAPRIO_PAD,
1158 __TCA_TAPRIO_ATTR_MAX,
1159};
1160
1161#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
1162
1163#endif
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index b17201edfa09..b4875a93363a 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -220,4 +220,12 @@ struct prctl_mm_map {
220# define PR_SPEC_DISABLE (1UL << 2) 220# define PR_SPEC_DISABLE (1UL << 2)
221# define PR_SPEC_FORCE_DISABLE (1UL << 3) 221# define PR_SPEC_FORCE_DISABLE (1UL << 3)
222 222
223/* Reset arm64 pointer authentication keys */
224#define PR_PAC_RESET_KEYS 54
225# define PR_PAC_APIAKEY (1UL << 0)
226# define PR_PAC_APIBKEY (1UL << 1)
227# define PR_PAC_APDAKEY (1UL << 2)
228# define PR_PAC_APDBKEY (1UL << 3)
229# define PR_PAC_APGAKEY (1UL << 4)
230
223#endif /* _LINUX_PRCTL_H */ 231#endif /* _LINUX_PRCTL_H */
diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h
index 84c3de89696a..40d028eed645 100644
--- a/tools/include/uapi/linux/vhost.h
+++ b/tools/include/uapi/linux/vhost.h
@@ -11,94 +11,9 @@
11 * device configuration. 11 * device configuration.
12 */ 12 */
13 13
14#include <linux/vhost_types.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/compiler.h>
16#include <linux/ioctl.h> 16#include <linux/ioctl.h>
17#include <linux/virtio_config.h>
18#include <linux/virtio_ring.h>
19
20struct vhost_vring_state {
21 unsigned int index;
22 unsigned int num;
23};
24
25struct vhost_vring_file {
26 unsigned int index;
27 int fd; /* Pass -1 to unbind from file. */
28
29};
30
31struct vhost_vring_addr {
32 unsigned int index;
33 /* Option flags. */
34 unsigned int flags;
35 /* Flag values: */
36 /* Whether log address is valid. If set enables logging. */
37#define VHOST_VRING_F_LOG 0
38
39 /* Start of array of descriptors (virtually contiguous) */
40 __u64 desc_user_addr;
41 /* Used structure address. Must be 32 bit aligned */
42 __u64 used_user_addr;
43 /* Available structure address. Must be 16 bit aligned */
44 __u64 avail_user_addr;
45 /* Logging support. */
46 /* Log writes to used structure, at offset calculated from specified
47 * address. Address must be 32 bit aligned. */
48 __u64 log_guest_addr;
49};
50
51/* no alignment requirement */
52struct vhost_iotlb_msg {
53 __u64 iova;
54 __u64 size;
55 __u64 uaddr;
56#define VHOST_ACCESS_RO 0x1
57#define VHOST_ACCESS_WO 0x2
58#define VHOST_ACCESS_RW 0x3
59 __u8 perm;
60#define VHOST_IOTLB_MISS 1
61#define VHOST_IOTLB_UPDATE 2
62#define VHOST_IOTLB_INVALIDATE 3
63#define VHOST_IOTLB_ACCESS_FAIL 4
64 __u8 type;
65};
66
67#define VHOST_IOTLB_MSG 0x1
68#define VHOST_IOTLB_MSG_V2 0x2
69
70struct vhost_msg {
71 int type;
72 union {
73 struct vhost_iotlb_msg iotlb;
74 __u8 padding[64];
75 };
76};
77
78struct vhost_msg_v2 {
79 __u32 type;
80 __u32 reserved;
81 union {
82 struct vhost_iotlb_msg iotlb;
83 __u8 padding[64];
84 };
85};
86
87struct vhost_memory_region {
88 __u64 guest_phys_addr;
89 __u64 memory_size; /* bytes */
90 __u64 userspace_addr;
91 __u64 flags_padding; /* No flags are currently specified. */
92};
93
94/* All region addresses and sizes must be 4K aligned. */
95#define VHOST_PAGE_SIZE 0x1000
96
97struct vhost_memory {
98 __u32 nregions;
99 __u32 padding;
100 struct vhost_memory_region regions[0];
101};
102 17
103/* ioctls */ 18/* ioctls */
104 19
@@ -186,31 +101,7 @@ struct vhost_memory {
186 * device. This can be used to stop the ring (e.g. for migration). */ 101 * device. This can be used to stop the ring (e.g. for migration). */
187#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file) 102#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
188 103
189/* Feature bits */ 104/* VHOST_SCSI specific defines */
190/* Log all write descriptors. Can be changed while device is active. */
191#define VHOST_F_LOG_ALL 26
192/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
193#define VHOST_NET_F_VIRTIO_NET_HDR 27
194
195/* VHOST_SCSI specific definitions */
196
197/*
198 * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
199 *
200 * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
201 * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
202 * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
203 * All the targets under vhost_wwpn can be seen and used by guset.
204 */
205
206#define VHOST_SCSI_ABI_VERSION 1
207
208struct vhost_scsi_target {
209 int abi_version;
210 char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */
211 unsigned short vhost_tpgt;
212 unsigned short reserved;
213};
214 105
215#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) 106#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
216#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) 107#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index f81e549ddfdb..4db74758c674 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -1,2 +1,3 @@
1libbpf_version.h 1libbpf_version.h
2FEATURE-DUMP.libbpf 2FEATURE-DUMP.libbpf
3test_libbpf
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
index 056f38310722..607aae40f4ed 100644
--- a/tools/lib/bpf/README.rst
+++ b/tools/lib/bpf/README.rst
@@ -132,6 +132,20 @@ For example, if current state of ``libbpf.map`` is:
132Format of version script and ways to handle ABI changes, including 132Format of version script and ways to handle ABI changes, including
133incompatible ones, described in details in [1]. 133incompatible ones, described in details in [1].
134 134
135Stand-alone build
136=================
137
138Under https://github.com/libbpf/libbpf there is a (semi-)automated
139mirror of the mainline's version of libbpf for a stand-alone build.
140
141However, all changes to libbpf's code base must be upstreamed through
142the mainline kernel tree.
143
144License
145=======
146
147libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause.
148
135Links 149Links
136===== 150=====
137 151
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 3caaa3428774..88cbd110ae58 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -65,6 +65,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
65 return syscall(__NR_bpf, cmd, attr, size); 65 return syscall(__NR_bpf, cmd, attr, size);
66} 66}
67 67
68static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
69{
70 int fd;
71
72 do {
73 fd = sys_bpf(BPF_PROG_LOAD, attr, size);
74 } while (fd < 0 && errno == EAGAIN);
75
76 return fd;
77}
78
68int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) 79int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
69{ 80{
70 __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; 81 __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
@@ -232,7 +243,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
232 memcpy(attr.prog_name, load_attr->name, 243 memcpy(attr.prog_name, load_attr->name,
233 min(name_len, BPF_OBJ_NAME_LEN - 1)); 244 min(name_len, BPF_OBJ_NAME_LEN - 1));
234 245
235 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 246 fd = sys_bpf_prog_load(&attr, sizeof(attr));
236 if (fd >= 0) 247 if (fd >= 0)
237 return fd; 248 return fd;
238 249
@@ -269,7 +280,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
269 break; 280 break;
270 } 281 }
271 282
272 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 283 fd = sys_bpf_prog_load(&attr, sizeof(attr));
273 284
274 if (fd >= 0) 285 if (fd >= 0)
275 goto done; 286 goto done;
@@ -283,7 +294,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
283 attr.log_size = log_buf_sz; 294 attr.log_size = log_buf_sz;
284 attr.log_level = 1; 295 attr.log_level = 1;
285 log_buf[0] = 0; 296 log_buf[0] = 0;
286 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 297 fd = sys_bpf_prog_load(&attr, sizeof(attr));
287done: 298done:
288 free(finfo); 299 free(finfo);
289 free(linfo); 300 free(linfo);
@@ -328,7 +339,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
328 attr.kern_version = kern_version; 339 attr.kern_version = kern_version;
329 attr.prog_flags = prog_flags; 340 attr.prog_flags = prog_flags;
330 341
331 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 342 return sys_bpf_prog_load(&attr, sizeof(attr));
332} 343}
333 344
334int bpf_map_update_elem(int fd, const void *key, const void *value, 345int bpf_map_update_elem(int fd, const void *key, const void *value,
diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c
index 8b31c0e00ba3..d463761a58f4 100644
--- a/tools/lib/traceevent/event-parse-api.c
+++ b/tools/lib/traceevent/event-parse-api.c
@@ -194,13 +194,13 @@ void tep_set_page_size(struct tep_handle *pevent, int _page_size)
194} 194}
195 195
196/** 196/**
197 * tep_is_file_bigendian - get if the file is in big endian order 197 * tep_file_bigendian - get if the file is in big endian order
198 * @pevent: a handle to the tep_handle 198 * @pevent: a handle to the tep_handle
199 * 199 *
200 * This returns if the file is in big endian order 200 * This returns if the file is in big endian order
201 * If @pevent is NULL, 0 is returned. 201 * If @pevent is NULL, 0 is returned.
202 */ 202 */
203int tep_is_file_bigendian(struct tep_handle *pevent) 203int tep_file_bigendian(struct tep_handle *pevent)
204{ 204{
205 if(pevent) 205 if(pevent)
206 return pevent->file_bigendian; 206 return pevent->file_bigendian;
diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h
index 9a092dd4a86d..35833ee32d6c 100644
--- a/tools/lib/traceevent/event-parse-local.h
+++ b/tools/lib/traceevent/event-parse-local.h
@@ -7,7 +7,7 @@
7#ifndef _PARSE_EVENTS_INT_H 7#ifndef _PARSE_EVENTS_INT_H
8#define _PARSE_EVENTS_INT_H 8#define _PARSE_EVENTS_INT_H
9 9
10struct cmdline; 10struct tep_cmdline;
11struct cmdline_list; 11struct cmdline_list;
12struct func_map; 12struct func_map;
13struct func_list; 13struct func_list;
@@ -36,7 +36,7 @@ struct tep_handle {
36 int long_size; 36 int long_size;
37 int page_size; 37 int page_size;
38 38
39 struct cmdline *cmdlines; 39 struct tep_cmdline *cmdlines;
40 struct cmdline_list *cmdlist; 40 struct cmdline_list *cmdlist;
41 int cmdline_count; 41 int cmdline_count;
42 42
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 69a96e39f0ab..abd4fa5d3088 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -124,15 +124,15 @@ struct tep_print_arg *alloc_arg(void)
124 return calloc(1, sizeof(struct tep_print_arg)); 124 return calloc(1, sizeof(struct tep_print_arg));
125} 125}
126 126
127struct cmdline { 127struct tep_cmdline {
128 char *comm; 128 char *comm;
129 int pid; 129 int pid;
130}; 130};
131 131
132static int cmdline_cmp(const void *a, const void *b) 132static int cmdline_cmp(const void *a, const void *b)
133{ 133{
134 const struct cmdline *ca = a; 134 const struct tep_cmdline *ca = a;
135 const struct cmdline *cb = b; 135 const struct tep_cmdline *cb = b;
136 136
137 if (ca->pid < cb->pid) 137 if (ca->pid < cb->pid)
138 return -1; 138 return -1;
@@ -152,7 +152,7 @@ static int cmdline_init(struct tep_handle *pevent)
152{ 152{
153 struct cmdline_list *cmdlist = pevent->cmdlist; 153 struct cmdline_list *cmdlist = pevent->cmdlist;
154 struct cmdline_list *item; 154 struct cmdline_list *item;
155 struct cmdline *cmdlines; 155 struct tep_cmdline *cmdlines;
156 int i; 156 int i;
157 157
158 cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count); 158 cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count);
@@ -179,8 +179,8 @@ static int cmdline_init(struct tep_handle *pevent)
179 179
180static const char *find_cmdline(struct tep_handle *pevent, int pid) 180static const char *find_cmdline(struct tep_handle *pevent, int pid)
181{ 181{
182 const struct cmdline *comm; 182 const struct tep_cmdline *comm;
183 struct cmdline key; 183 struct tep_cmdline key;
184 184
185 if (!pid) 185 if (!pid)
186 return "<idle>"; 186 return "<idle>";
@@ -208,8 +208,8 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid)
208 */ 208 */
209int tep_pid_is_registered(struct tep_handle *pevent, int pid) 209int tep_pid_is_registered(struct tep_handle *pevent, int pid)
210{ 210{
211 const struct cmdline *comm; 211 const struct tep_cmdline *comm;
212 struct cmdline key; 212 struct tep_cmdline key;
213 213
214 if (!pid) 214 if (!pid)
215 return 1; 215 return 1;
@@ -232,11 +232,13 @@ int tep_pid_is_registered(struct tep_handle *pevent, int pid)
232 * we must add this pid. This is much slower than when cmdlines 232 * we must add this pid. This is much slower than when cmdlines
233 * are added before the array is initialized. 233 * are added before the array is initialized.
234 */ 234 */
235static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid) 235static int add_new_comm(struct tep_handle *pevent,
236 const char *comm, int pid, bool override)
236{ 237{
237 struct cmdline *cmdlines = pevent->cmdlines; 238 struct tep_cmdline *cmdlines = pevent->cmdlines;
238 const struct cmdline *cmdline; 239 struct tep_cmdline *cmdline;
239 struct cmdline key; 240 struct tep_cmdline key;
241 char *new_comm;
240 242
241 if (!pid) 243 if (!pid)
242 return 0; 244 return 0;
@@ -247,8 +249,19 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
247 cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count, 249 cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
248 sizeof(*pevent->cmdlines), cmdline_cmp); 250 sizeof(*pevent->cmdlines), cmdline_cmp);
249 if (cmdline) { 251 if (cmdline) {
250 errno = EEXIST; 252 if (!override) {
251 return -1; 253 errno = EEXIST;
254 return -1;
255 }
256 new_comm = strdup(comm);
257 if (!new_comm) {
258 errno = ENOMEM;
259 return -1;
260 }
261 free(cmdline->comm);
262 cmdline->comm = new_comm;
263
264 return 0;
252 } 265 }
253 266
254 cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1)); 267 cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1));
@@ -275,21 +288,13 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
275 return 0; 288 return 0;
276} 289}
277 290
278/** 291static int _tep_register_comm(struct tep_handle *pevent,
279 * tep_register_comm - register a pid / comm mapping 292 const char *comm, int pid, bool override)
280 * @pevent: handle for the pevent
281 * @comm: the command line to register
282 * @pid: the pid to map the command line to
283 *
284 * This adds a mapping to search for command line names with
285 * a given pid. The comm is duplicated.
286 */
287int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
288{ 293{
289 struct cmdline_list *item; 294 struct cmdline_list *item;
290 295
291 if (pevent->cmdlines) 296 if (pevent->cmdlines)
292 return add_new_comm(pevent, comm, pid); 297 return add_new_comm(pevent, comm, pid, override);
293 298
294 item = malloc(sizeof(*item)); 299 item = malloc(sizeof(*item));
295 if (!item) 300 if (!item)
@@ -312,6 +317,40 @@ int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
312 return 0; 317 return 0;
313} 318}
314 319
320/**
321 * tep_register_comm - register a pid / comm mapping
322 * @pevent: handle for the pevent
323 * @comm: the command line to register
324 * @pid: the pid to map the command line to
325 *
326 * This adds a mapping to search for command line names with
327 * a given pid. The comm is duplicated. If a command with the same pid
328 * already exist, -1 is returned and errno is set to EEXIST
329 */
330int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
331{
332 return _tep_register_comm(pevent, comm, pid, false);
333}
334
335/**
336 * tep_override_comm - register a pid / comm mapping
337 * @pevent: handle for the pevent
338 * @comm: the command line to register
339 * @pid: the pid to map the command line to
340 *
341 * This adds a mapping to search for command line names with
342 * a given pid. The comm is duplicated. If a command with the same pid
343 * already exist, the command string is udapted with the new one
344 */
345int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid)
346{
347 if (!pevent->cmdlines && cmdline_init(pevent)) {
348 errno = ENOMEM;
349 return -1;
350 }
351 return _tep_register_comm(pevent, comm, pid, true);
352}
353
315int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock) 354int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock)
316{ 355{
317 pevent->trace_clock = strdup(trace_clock); 356 pevent->trace_clock = strdup(trace_clock);
@@ -5227,18 +5266,6 @@ int tep_data_type(struct tep_handle *pevent, struct tep_record *rec)
5227} 5266}
5228 5267
5229/** 5268/**
5230 * tep_data_event_from_type - find the event by a given type
5231 * @pevent: a handle to the pevent
5232 * @type: the type of the event.
5233 *
5234 * This returns the event form a given @type;
5235 */
5236struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type)
5237{
5238 return tep_find_event(pevent, type);
5239}
5240
5241/**
5242 * tep_data_pid - parse the PID from record 5269 * tep_data_pid - parse the PID from record
5243 * @pevent: a handle to the pevent 5270 * @pevent: a handle to the pevent
5244 * @rec: the record to parse 5271 * @rec: the record to parse
@@ -5292,8 +5319,8 @@ const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid)
5292 return comm; 5319 return comm;
5293} 5320}
5294 5321
5295static struct cmdline * 5322static struct tep_cmdline *
5296pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *next) 5323pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline *next)
5297{ 5324{
5298 struct cmdline_list *cmdlist = (struct cmdline_list *)next; 5325 struct cmdline_list *cmdlist = (struct cmdline_list *)next;
5299 5326
@@ -5305,7 +5332,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *ne
5305 while (cmdlist && strcmp(cmdlist->comm, comm) != 0) 5332 while (cmdlist && strcmp(cmdlist->comm, comm) != 0)
5306 cmdlist = cmdlist->next; 5333 cmdlist = cmdlist->next;
5307 5334
5308 return (struct cmdline *)cmdlist; 5335 return (struct tep_cmdline *)cmdlist;
5309} 5336}
5310 5337
5311/** 5338/**
@@ -5321,10 +5348,10 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *ne
5321 * next pid. 5348 * next pid.
5322 * Also, it does a linear search, so it may be slow. 5349 * Also, it does a linear search, so it may be slow.
5323 */ 5350 */
5324struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, 5351struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
5325 struct cmdline *next) 5352 struct tep_cmdline *next)
5326{ 5353{
5327 struct cmdline *cmdline; 5354 struct tep_cmdline *cmdline;
5328 5355
5329 /* 5356 /*
5330 * If the cmdlines have not been converted yet, then use 5357 * If the cmdlines have not been converted yet, then use
@@ -5363,7 +5390,7 @@ struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *co
5363 * Returns the pid for a give cmdline. If @cmdline is NULL, then 5390 * Returns the pid for a give cmdline. If @cmdline is NULL, then
5364 * -1 is returned. 5391 * -1 is returned.
5365 */ 5392 */
5366int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline) 5393int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline)
5367{ 5394{
5368 struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline; 5395 struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
5369 5396
@@ -6593,6 +6620,12 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
6593 * 6620 *
6594 * If @id is >= 0, then it is used to find the event. 6621 * If @id is >= 0, then it is used to find the event.
6595 * else @sys_name and @event_name are used. 6622 * else @sys_name and @event_name are used.
6623 *
6624 * Returns:
6625 * TEP_REGISTER_SUCCESS_OVERWRITE if an existing handler is overwritten
6626 * TEP_REGISTER_SUCCESS if a new handler is registered successfully
6627 * negative TEP_ERRNO_... in case of an error
6628 *
6596 */ 6629 */
6597int tep_register_event_handler(struct tep_handle *pevent, int id, 6630int tep_register_event_handler(struct tep_handle *pevent, int id,
6598 const char *sys_name, const char *event_name, 6631 const char *sys_name, const char *event_name,
@@ -6610,7 +6643,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
6610 6643
6611 event->handler = func; 6644 event->handler = func;
6612 event->context = context; 6645 event->context = context;
6613 return 0; 6646 return TEP_REGISTER_SUCCESS_OVERWRITE;
6614 6647
6615 not_found: 6648 not_found:
6616 /* Save for later use. */ 6649 /* Save for later use. */
@@ -6640,7 +6673,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
6640 pevent->handlers = handle; 6673 pevent->handlers = handle;
6641 handle->context = context; 6674 handle->context = context;
6642 6675
6643 return -1; 6676 return TEP_REGISTER_SUCCESS;
6644} 6677}
6645 6678
6646static int handle_matches(struct event_handler *handler, int id, 6679static int handle_matches(struct event_handler *handler, int id,
@@ -6723,8 +6756,10 @@ struct tep_handle *tep_alloc(void)
6723{ 6756{
6724 struct tep_handle *pevent = calloc(1, sizeof(*pevent)); 6757 struct tep_handle *pevent = calloc(1, sizeof(*pevent));
6725 6758
6726 if (pevent) 6759 if (pevent) {
6727 pevent->ref_count = 1; 6760 pevent->ref_count = 1;
6761 pevent->host_bigendian = tep_host_bigendian();
6762 }
6728 6763
6729 return pevent; 6764 return pevent;
6730} 6765}
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 35d37087d3c5..aec48f2aea8a 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -432,6 +432,7 @@ int tep_set_function_resolver(struct tep_handle *pevent,
432 tep_func_resolver_t *func, void *priv); 432 tep_func_resolver_t *func, void *priv);
433void tep_reset_function_resolver(struct tep_handle *pevent); 433void tep_reset_function_resolver(struct tep_handle *pevent);
434int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid); 434int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid);
435int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid);
435int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock); 436int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock);
436int tep_register_function(struct tep_handle *pevent, char *name, 437int tep_register_function(struct tep_handle *pevent, char *name,
437 unsigned long long addr, char *mod); 438 unsigned long long addr, char *mod);
@@ -484,6 +485,11 @@ int tep_print_func_field(struct trace_seq *s, const char *fmt,
484 struct tep_event *event, const char *name, 485 struct tep_event *event, const char *name,
485 struct tep_record *record, int err); 486 struct tep_record *record, int err);
486 487
488enum tep_reg_handler {
489 TEP_REGISTER_SUCCESS = 0,
490 TEP_REGISTER_SUCCESS_OVERWRITE,
491};
492
487int tep_register_event_handler(struct tep_handle *pevent, int id, 493int tep_register_event_handler(struct tep_handle *pevent, int id,
488 const char *sys_name, const char *event_name, 494 const char *sys_name, const char *event_name,
489 tep_event_handler_func func, void *context); 495 tep_event_handler_func func, void *context);
@@ -520,15 +526,14 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record);
520void tep_data_lat_fmt(struct tep_handle *pevent, 526void tep_data_lat_fmt(struct tep_handle *pevent,
521 struct trace_seq *s, struct tep_record *record); 527 struct trace_seq *s, struct tep_record *record);
522int tep_data_type(struct tep_handle *pevent, struct tep_record *rec); 528int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
523struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type);
524int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec); 529int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
525int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec); 530int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
526int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec); 531int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
527const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid); 532const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid);
528struct cmdline; 533struct tep_cmdline;
529struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, 534struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
530 struct cmdline *next); 535 struct tep_cmdline *next);
531int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline); 536int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline);
532 537
533void tep_print_field(struct trace_seq *s, void *data, 538void tep_print_field(struct trace_seq *s, void *data,
534 struct tep_format_field *field); 539 struct tep_format_field *field);
@@ -553,7 +558,7 @@ int tep_get_long_size(struct tep_handle *pevent);
553void tep_set_long_size(struct tep_handle *pevent, int long_size); 558void tep_set_long_size(struct tep_handle *pevent, int long_size);
554int tep_get_page_size(struct tep_handle *pevent); 559int tep_get_page_size(struct tep_handle *pevent);
555void tep_set_page_size(struct tep_handle *pevent, int _page_size); 560void tep_set_page_size(struct tep_handle *pevent, int _page_size);
556int tep_is_file_bigendian(struct tep_handle *pevent); 561int tep_file_bigendian(struct tep_handle *pevent);
557void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian); 562void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian);
558int tep_is_host_bigendian(struct tep_handle *pevent); 563int tep_is_host_bigendian(struct tep_handle *pevent);
559void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian); 564void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian);
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
index 754050eea467..64b9c25a1fd3 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -389,7 +389,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
389 * We can only use the structure if file is of the same 389 * We can only use the structure if file is of the same
390 * endianness. 390 * endianness.
391 */ 391 */
392 if (tep_is_file_bigendian(event->pevent) == 392 if (tep_file_bigendian(event->pevent) ==
393 tep_is_host_bigendian(event->pevent)) { 393 tep_is_host_bigendian(event->pevent)) {
394 394
395 trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s", 395 trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c
index 8ff1d55954d1..8d5ecd2bf877 100644
--- a/tools/lib/traceevent/trace-seq.c
+++ b/tools/lib/traceevent/trace-seq.c
@@ -100,7 +100,8 @@ static void expand_buffer(struct trace_seq *s)
100 * @fmt: printf format string 100 * @fmt: printf format string
101 * 101 *
102 * It returns 0 if the trace oversizes the buffer's free 102 * It returns 0 if the trace oversizes the buffer's free
103 * space, 1 otherwise. 103 * space, the number of characters printed, or a negative
104 * value in case of an error.
104 * 105 *
105 * The tracer may use either sequence operations or its own 106 * The tracer may use either sequence operations or its own
106 * copy to user routines. To simplify formating of a trace 107 * copy to user routines. To simplify formating of a trace
@@ -129,9 +130,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
129 goto try_again; 130 goto try_again;
130 } 131 }
131 132
132 s->len += ret; 133 if (ret > 0)
134 s->len += ret;
133 135
134 return 1; 136 return ret;
135} 137}
136 138
137/** 139/**
@@ -139,6 +141,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
139 * @s: trace sequence descriptor 141 * @s: trace sequence descriptor
140 * @fmt: printf format string 142 * @fmt: printf format string
141 * 143 *
144 * It returns 0 if the trace oversizes the buffer's free
145 * space, the number of characters printed, or a negative
146 * value in case of an error.
147 * *
142 * The tracer may use either sequence operations or its own 148 * The tracer may use either sequence operations or its own
143 * copy to user routines. To simplify formating of a trace 149 * copy to user routines. To simplify formating of a trace
144 * trace_seq_printf is used to store strings into a special 150 * trace_seq_printf is used to store strings into a special
@@ -163,9 +169,10 @@ trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
163 goto try_again; 169 goto try_again;
164 } 170 }
165 171
166 s->len += ret; 172 if (ret > 0)
173 s->len += ret;
167 174
168 return len; 175 return ret;
169} 176}
170 177
171/** 178/**
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index 095aebdc5bb7..e6150f21267d 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -19,8 +19,11 @@ C2C stands for Cache To Cache.
19The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows 19The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows
20you to track down the cacheline contentions. 20you to track down the cacheline contentions.
21 21
22The tool is based on x86's load latency and precise store facility events 22On x86, the tool is based on load latency and precise store facility events
23provided by Intel CPUs. These events provide: 23provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling
24with thresholding feature.
25
26These events provide:
24 - memory address of the access 27 - memory address of the access
25 - type of the access (load and store details) 28 - type of the access (load and store details)
26 - latency (in cycles) of the load access 29 - latency (in cycles) of the load access
@@ -46,7 +49,7 @@ RECORD OPTIONS
46 49
47-l:: 50-l::
48--ldlat:: 51--ldlat::
49 Configure mem-loads latency. 52 Configure mem-loads latency. (x86 only)
50 53
51-k:: 54-k::
52--all-kernel:: 55--all-kernel::
@@ -119,11 +122,16 @@ Following perf record options are configured by default:
119 -W,-d,--phys-data,--sample-cpu 122 -W,-d,--phys-data,--sample-cpu
120 123
121Unless specified otherwise with '-e' option, following events are monitored by 124Unless specified otherwise with '-e' option, following events are monitored by
122default: 125default on x86:
123 126
124 cpu/mem-loads,ldlat=30/P 127 cpu/mem-loads,ldlat=30/P
125 cpu/mem-stores/P 128 cpu/mem-stores/P
126 129
130and following on PowerPC:
131
132 cpu/mem-loads/
133 cpu/mem-stores/
134
127User can pass any 'perf record' option behind '--' mark, like (to enable 135User can pass any 'perf record' option behind '--' mark, like (to enable
128callchains and system wide monitoring): 136callchains and system wide monitoring):
129 137
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index f8d2167cf3e7..199ea0f0a6c0 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -82,7 +82,7 @@ RECORD OPTIONS
82 Be more verbose (show counter open errors, etc) 82 Be more verbose (show counter open errors, etc)
83 83
84--ldlat <n>:: 84--ldlat <n>::
85 Specify desired latency for loads event. 85 Specify desired latency for loads event. (x86 only)
86 86
87In addition, for report all perf report options are valid, and for record 87In addition, for report all perf report options are valid, and for record
88all perf record options. 88all perf record options.
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index ff29c3372ec3..0ee6795d82cc 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -524,12 +524,14 @@ $(arch_errno_name_array): $(arch_errno_tbl)
524 524
525all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS) 525all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
526 526
527# Create python binding output directory if not already present
528_dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python')
529
527$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST) 530$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
528 $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \ 531 $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
529 CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \ 532 CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
530 $(PYTHON_WORD) util/setup.py \ 533 $(PYTHON_WORD) util/setup.py \
531 --quiet build_ext; \ 534 --quiet build_ext; \
532 mkdir -p $(OUTPUT)python && \
533 cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/ 535 cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/
534 536
535please_set_SHELL_PATH_to_a_more_modern_shell: 537please_set_SHELL_PATH_to_a_more_modern_shell:
@@ -660,12 +662,12 @@ $(OUTPUT)perf-%: %.o $(PERFLIBS)
660 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS) 662 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS)
661 663
662ifndef NO_PERF_READ_VDSO32 664ifndef NO_PERF_READ_VDSO32
663$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-vdso-map.c 665$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-map.c
664 $(QUIET_CC)$(CC) -m32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c 666 $(QUIET_CC)$(CC) -m32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
665endif 667endif
666 668
667ifndef NO_PERF_READ_VDSOX32 669ifndef NO_PERF_READ_VDSOX32
668$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-vdso-map.c 670$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-map.c
669 $(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c 671 $(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
670endif 672endif
671 673
diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build
index 883c57ff0c08..d9ae2733f9cc 100644
--- a/tools/perf/arch/arm/tests/Build
+++ b/tools/perf/arch/arm/tests/Build
@@ -1,4 +1,5 @@
1libperf-y += regs_load.o 1libperf-y += regs_load.o
2libperf-y += dwarf-unwind.o 2libperf-y += dwarf-unwind.o
3libperf-y += vectors-page.o
3 4
4libperf-y += arch-tests.o 5libperf-y += arch-tests.o
diff --git a/tools/perf/arch/arm/tests/arch-tests.c b/tools/perf/arch/arm/tests/arch-tests.c
index 5b1543c98022..6848101a855f 100644
--- a/tools/perf/arch/arm/tests/arch-tests.c
+++ b/tools/perf/arch/arm/tests/arch-tests.c
@@ -11,6 +11,10 @@ struct test arch_tests[] = {
11 }, 11 },
12#endif 12#endif
13 { 13 {
14 .desc = "Vectors page",
15 .func = test__vectors_page,
16 },
17 {
14 .func = NULL, 18 .func = NULL,
15 }, 19 },
16}; 20};
diff --git a/tools/perf/arch/arm/tests/vectors-page.c b/tools/perf/arch/arm/tests/vectors-page.c
new file mode 100644
index 000000000000..7ffdd79971c8
--- /dev/null
+++ b/tools/perf/arch/arm/tests/vectors-page.c
@@ -0,0 +1,24 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <stdio.h>
3#include <string.h>
4#include <linux/compiler.h>
5
6#include "debug.h"
7#include "tests/tests.h"
8#include "util/find-map.c"
9
10#define VECTORS__MAP_NAME "[vectors]"
11
12int test__vectors_page(struct test *test __maybe_unused,
13 int subtest __maybe_unused)
14{
15 void *start, *end;
16
17 if (find_map(&start, &end, VECTORS__MAP_NAME)) {
18 pr_err("%s not found, is CONFIG_KUSER_HELPERS enabled?\n",
19 VECTORS__MAP_NAME);
20 return TEST_FAIL;
21 }
22
23 return TEST_OK;
24}
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index a111239df182..e58d00d62f02 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -14,18 +14,25 @@ PERF_HAVE_JITDUMP := 1
14out := $(OUTPUT)arch/powerpc/include/generated/asm 14out := $(OUTPUT)arch/powerpc/include/generated/asm
15header32 := $(out)/syscalls_32.c 15header32 := $(out)/syscalls_32.c
16header64 := $(out)/syscalls_64.c 16header64 := $(out)/syscalls_64.c
17sysdef := $(srctree)/tools/arch/powerpc/include/uapi/asm/unistd.h 17syskrn := $(srctree)/arch/powerpc/kernel/syscalls/syscall.tbl
18sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls/ 18sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls
19sysdef := $(sysprf)/syscall.tbl
19systbl := $(sysprf)/mksyscalltbl 20systbl := $(sysprf)/mksyscalltbl
20 21
21# Create output directory if not already present 22# Create output directory if not already present
22_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') 23_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
23 24
24$(header64): $(sysdef) $(systbl) 25$(header64): $(sysdef) $(systbl)
25 $(Q)$(SHELL) '$(systbl)' '64' '$(CC)' $(sysdef) > $@ 26 @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
27 (diff -B $(sysdef) $(syskrn) >/dev/null) \
28 || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
29 $(Q)$(SHELL) '$(systbl)' '64' $(sysdef) > $@
26 30
27$(header32): $(sysdef) $(systbl) 31$(header32): $(sysdef) $(systbl)
28 $(Q)$(SHELL) '$(systbl)' '32' '$(CC)' $(sysdef) > $@ 32 @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
33 (diff -B $(sysdef) $(syskrn) >/dev/null) \
34 || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
35 $(Q)$(SHELL) '$(systbl)' '32' $(sysdef) > $@
29 36
30clean:: 37clean::
31 $(call QUIET_CLEAN, powerpc) $(RM) $(header32) $(header64) 38 $(call QUIET_CLEAN, powerpc) $(RM) $(header32) $(header64)
diff --git a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl b/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
index ef52e1dd694b..6c58060aa03b 100755
--- a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
@@ -9,10 +9,9 @@
9# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> 9# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
10 10
11wordsize=$1 11wordsize=$1
12gcc=$2 12SYSCALL_TBL=$2
13input=$3
14 13
15if ! test -r $input; then 14if ! test -r $SYSCALL_TBL; then
16 echo "Could not read input file" >&2 15 echo "Could not read input file" >&2
17 exit 1 16 exit 1
18fi 17fi
@@ -20,18 +19,21 @@ fi
20create_table() 19create_table()
21{ 20{
22 local wordsize=$1 21 local wordsize=$1
23 local max_nr 22 local max_nr nr abi sc discard
23 max_nr=-1
24 nr=0
24 25
25 echo "static const char *syscalltbl_powerpc_${wordsize}[] = {" 26 echo "static const char *syscalltbl_powerpc_${wordsize}[] = {"
26 while read sc nr; do 27 while read nr abi sc discard; do
27 printf '\t[%d] = "%s",\n' $nr $sc 28 if [ "$max_nr" -lt "$nr" ]; then
28 max_nr=$nr 29 printf '\t[%d] = "%s",\n' $nr $sc
30 max_nr=$nr
31 fi
29 done 32 done
30 echo '};' 33 echo '};'
31 echo "#define SYSCALLTBL_POWERPC_${wordsize}_MAX_ID $max_nr" 34 echo "#define SYSCALLTBL_POWERPC_${wordsize}_MAX_ID $max_nr"
32} 35}
33 36
34$gcc -m${wordsize} -E -dM -x c $input \ 37grep -E "^[[:digit:]]+[[:space:]]+(common|spu|nospu|${wordsize})" $SYSCALL_TBL \
35 |sed -ne 's/^#define __NR_//p' \ 38 |sort -k1 -n \
36 |sort -t' ' -k2 -nu \
37 |create_table ${wordsize} 39 |create_table ${wordsize}
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..db3bbb8744af
--- /dev/null
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -0,0 +1,427 @@
1# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2#
3# system call numbers and entry vectors for powerpc
4#
5# The format is:
6# <number> <abi> <name> <entry point> <compat entry point>
7#
8# The <abi> can be common, spu, nospu, 64, or 32 for this file.
9#
100 nospu restart_syscall sys_restart_syscall
111 nospu exit sys_exit
122 nospu fork ppc_fork
133 common read sys_read
144 common write sys_write
155 common open sys_open compat_sys_open
166 common close sys_close
177 common waitpid sys_waitpid
188 common creat sys_creat
199 common link sys_link
2010 common unlink sys_unlink
2111 nospu execve sys_execve compat_sys_execve
2212 common chdir sys_chdir
2313 common time sys_time compat_sys_time
2414 common mknod sys_mknod
2515 common chmod sys_chmod
2616 common lchown sys_lchown
2717 common break sys_ni_syscall
2818 32 oldstat sys_stat sys_ni_syscall
2918 64 oldstat sys_ni_syscall
3018 spu oldstat sys_ni_syscall
3119 common lseek sys_lseek compat_sys_lseek
3220 common getpid sys_getpid
3321 nospu mount sys_mount compat_sys_mount
3422 32 umount sys_oldumount
3522 64 umount sys_ni_syscall
3622 spu umount sys_ni_syscall
3723 common setuid sys_setuid
3824 common getuid sys_getuid
3925 common stime sys_stime compat_sys_stime
4026 nospu ptrace sys_ptrace compat_sys_ptrace
4127 common alarm sys_alarm
4228 32 oldfstat sys_fstat sys_ni_syscall
4328 64 oldfstat sys_ni_syscall
4428 spu oldfstat sys_ni_syscall
4529 nospu pause sys_pause
4630 nospu utime sys_utime compat_sys_utime
4731 common stty sys_ni_syscall
4832 common gtty sys_ni_syscall
4933 common access sys_access
5034 common nice sys_nice
5135 common ftime sys_ni_syscall
5236 common sync sys_sync
5337 common kill sys_kill
5438 common rename sys_rename
5539 common mkdir sys_mkdir
5640 common rmdir sys_rmdir
5741 common dup sys_dup
5842 common pipe sys_pipe
5943 common times sys_times compat_sys_times
6044 common prof sys_ni_syscall
6145 common brk sys_brk
6246 common setgid sys_setgid
6347 common getgid sys_getgid
6448 nospu signal sys_signal
6549 common geteuid sys_geteuid
6650 common getegid sys_getegid
6751 nospu acct sys_acct
6852 nospu umount2 sys_umount
6953 common lock sys_ni_syscall
7054 common ioctl sys_ioctl compat_sys_ioctl
7155 common fcntl sys_fcntl compat_sys_fcntl
7256 common mpx sys_ni_syscall
7357 common setpgid sys_setpgid
7458 common ulimit sys_ni_syscall
7559 32 oldolduname sys_olduname
7659 64 oldolduname sys_ni_syscall
7759 spu oldolduname sys_ni_syscall
7860 common umask sys_umask
7961 common chroot sys_chroot
8062 nospu ustat sys_ustat compat_sys_ustat
8163 common dup2 sys_dup2
8264 common getppid sys_getppid
8365 common getpgrp sys_getpgrp
8466 common setsid sys_setsid
8567 32 sigaction sys_sigaction compat_sys_sigaction
8667 64 sigaction sys_ni_syscall
8767 spu sigaction sys_ni_syscall
8868 common sgetmask sys_sgetmask
8969 common ssetmask sys_ssetmask
9070 common setreuid sys_setreuid
9171 common setregid sys_setregid
9272 32 sigsuspend sys_sigsuspend
9372 64 sigsuspend sys_ni_syscall
9472 spu sigsuspend sys_ni_syscall
9573 32 sigpending sys_sigpending compat_sys_sigpending
9673 64 sigpending sys_ni_syscall
9773 spu sigpending sys_ni_syscall
9874 common sethostname sys_sethostname
9975 common setrlimit sys_setrlimit compat_sys_setrlimit
10076 32 getrlimit sys_old_getrlimit compat_sys_old_getrlimit
10176 64 getrlimit sys_ni_syscall
10276 spu getrlimit sys_ni_syscall
10377 common getrusage sys_getrusage compat_sys_getrusage
10478 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
10579 common settimeofday sys_settimeofday compat_sys_settimeofday
10680 common getgroups sys_getgroups
10781 common setgroups sys_setgroups
10882 32 select ppc_select sys_ni_syscall
10982 64 select sys_ni_syscall
11082 spu select sys_ni_syscall
11183 common symlink sys_symlink
11284 32 oldlstat sys_lstat sys_ni_syscall
11384 64 oldlstat sys_ni_syscall
11484 spu oldlstat sys_ni_syscall
11585 common readlink sys_readlink
11686 nospu uselib sys_uselib
11787 nospu swapon sys_swapon
11888 nospu reboot sys_reboot
11989 32 readdir sys_old_readdir compat_sys_old_readdir
12089 64 readdir sys_ni_syscall
12189 spu readdir sys_ni_syscall
12290 common mmap sys_mmap
12391 common munmap sys_munmap
12492 common truncate sys_truncate compat_sys_truncate
12593 common ftruncate sys_ftruncate compat_sys_ftruncate
12694 common fchmod sys_fchmod
12795 common fchown sys_fchown
12896 common getpriority sys_getpriority
12997 common setpriority sys_setpriority
13098 common profil sys_ni_syscall
13199 nospu statfs sys_statfs compat_sys_statfs
132100 nospu fstatfs sys_fstatfs compat_sys_fstatfs
133101 common ioperm sys_ni_syscall
134102 common socketcall sys_socketcall compat_sys_socketcall
135103 common syslog sys_syslog
136104 common setitimer sys_setitimer compat_sys_setitimer
137105 common getitimer sys_getitimer compat_sys_getitimer
138106 common stat sys_newstat compat_sys_newstat
139107 common lstat sys_newlstat compat_sys_newlstat
140108 common fstat sys_newfstat compat_sys_newfstat
141109 32 olduname sys_uname
142109 64 olduname sys_ni_syscall
143109 spu olduname sys_ni_syscall
144110 common iopl sys_ni_syscall
145111 common vhangup sys_vhangup
146112 common idle sys_ni_syscall
147113 common vm86 sys_ni_syscall
148114 common wait4 sys_wait4 compat_sys_wait4
149115 nospu swapoff sys_swapoff
150116 common sysinfo sys_sysinfo compat_sys_sysinfo
151117 nospu ipc sys_ipc compat_sys_ipc
152118 common fsync sys_fsync
153119 32 sigreturn sys_sigreturn compat_sys_sigreturn
154119 64 sigreturn sys_ni_syscall
155119 spu sigreturn sys_ni_syscall
156120 nospu clone ppc_clone
157121 common setdomainname sys_setdomainname
158122 common uname sys_newuname
159123 common modify_ldt sys_ni_syscall
160124 common adjtimex sys_adjtimex compat_sys_adjtimex
161125 common mprotect sys_mprotect
162126 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
163126 64 sigprocmask sys_ni_syscall
164126 spu sigprocmask sys_ni_syscall
165127 common create_module sys_ni_syscall
166128 nospu init_module sys_init_module
167129 nospu delete_module sys_delete_module
168130 common get_kernel_syms sys_ni_syscall
169131 nospu quotactl sys_quotactl
170132 common getpgid sys_getpgid
171133 common fchdir sys_fchdir
172134 common bdflush sys_bdflush
173135 common sysfs sys_sysfs
174136 32 personality sys_personality ppc64_personality
175136 64 personality ppc64_personality
176136 spu personality ppc64_personality
177137 common afs_syscall sys_ni_syscall
178138 common setfsuid sys_setfsuid
179139 common setfsgid sys_setfsgid
180140 common _llseek sys_llseek
181141 common getdents sys_getdents compat_sys_getdents
182142 common _newselect sys_select compat_sys_select
183143 common flock sys_flock
184144 common msync sys_msync
185145 common readv sys_readv compat_sys_readv
186146 common writev sys_writev compat_sys_writev
187147 common getsid sys_getsid
188148 common fdatasync sys_fdatasync
189149 nospu _sysctl sys_sysctl compat_sys_sysctl
190150 common mlock sys_mlock
191151 common munlock sys_munlock
192152 common mlockall sys_mlockall
193153 common munlockall sys_munlockall
194154 common sched_setparam sys_sched_setparam
195155 common sched_getparam sys_sched_getparam
196156 common sched_setscheduler sys_sched_setscheduler
197157 common sched_getscheduler sys_sched_getscheduler
198158 common sched_yield sys_sched_yield
199159 common sched_get_priority_max sys_sched_get_priority_max
200160 common sched_get_priority_min sys_sched_get_priority_min
201161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
202162 common nanosleep sys_nanosleep compat_sys_nanosleep
203163 common mremap sys_mremap
204164 common setresuid sys_setresuid
205165 common getresuid sys_getresuid
206166 common query_module sys_ni_syscall
207167 common poll sys_poll
208168 common nfsservctl sys_ni_syscall
209169 common setresgid sys_setresgid
210170 common getresgid sys_getresgid
211171 common prctl sys_prctl
212172 nospu rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
213173 nospu rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
214174 nospu rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
215175 nospu rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
216176 nospu rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
217177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
218178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
219179 common pread64 sys_pread64 compat_sys_pread64
220180 common pwrite64 sys_pwrite64 compat_sys_pwrite64
221181 common chown sys_chown
222182 common getcwd sys_getcwd
223183 common capget sys_capget
224184 common capset sys_capset
225185 nospu sigaltstack sys_sigaltstack compat_sys_sigaltstack
226186 32 sendfile sys_sendfile compat_sys_sendfile
227186 64 sendfile sys_sendfile64
228186 spu sendfile sys_sendfile64
229187 common getpmsg sys_ni_syscall
230188 common putpmsg sys_ni_syscall
231189 nospu vfork ppc_vfork
232190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
233191 common readahead sys_readahead compat_sys_readahead
234192 32 mmap2 sys_mmap2 compat_sys_mmap2
235193 32 truncate64 sys_truncate64 compat_sys_truncate64
236194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
237195 32 stat64 sys_stat64
238196 32 lstat64 sys_lstat64
239197 32 fstat64 sys_fstat64
240198 nospu pciconfig_read sys_pciconfig_read
241199 nospu pciconfig_write sys_pciconfig_write
242200 nospu pciconfig_iobase sys_pciconfig_iobase
243201 common multiplexer sys_ni_syscall
244202 common getdents64 sys_getdents64
245203 common pivot_root sys_pivot_root
246204 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
247205 common madvise sys_madvise
248206 common mincore sys_mincore
249207 common gettid sys_gettid
250208 common tkill sys_tkill
251209 common setxattr sys_setxattr
252210 common lsetxattr sys_lsetxattr
253211 common fsetxattr sys_fsetxattr
254212 common getxattr sys_getxattr
255213 common lgetxattr sys_lgetxattr
256214 common fgetxattr sys_fgetxattr
257215 common listxattr sys_listxattr
258216 common llistxattr sys_llistxattr
259217 common flistxattr sys_flistxattr
260218 common removexattr sys_removexattr
261219 common lremovexattr sys_lremovexattr
262220 common fremovexattr sys_fremovexattr
263221 common futex sys_futex compat_sys_futex
264222 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
265223 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
266# 224 unused
267225 common tuxcall sys_ni_syscall
268226 32 sendfile64 sys_sendfile64 compat_sys_sendfile64
269227 common io_setup sys_io_setup compat_sys_io_setup
270228 common io_destroy sys_io_destroy
271229 common io_getevents sys_io_getevents compat_sys_io_getevents
272230 common io_submit sys_io_submit compat_sys_io_submit
273231 common io_cancel sys_io_cancel
274232 nospu set_tid_address sys_set_tid_address
275233 common fadvise64 sys_fadvise64 ppc32_fadvise64
276234 nospu exit_group sys_exit_group
277235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
278236 common epoll_create sys_epoll_create
279237 common epoll_ctl sys_epoll_ctl
280238 common epoll_wait sys_epoll_wait
281239 common remap_file_pages sys_remap_file_pages
282240 common timer_create sys_timer_create compat_sys_timer_create
283241 common timer_settime sys_timer_settime compat_sys_timer_settime
284242 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
285243 common timer_getoverrun sys_timer_getoverrun
286244 common timer_delete sys_timer_delete
287245 common clock_settime sys_clock_settime compat_sys_clock_settime
288246 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
289247 common clock_getres sys_clock_getres compat_sys_clock_getres
290248 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
291249 32 swapcontext ppc_swapcontext ppc32_swapcontext
292249 64 swapcontext ppc64_swapcontext
293249 spu swapcontext sys_ni_syscall
294250 common tgkill sys_tgkill
295251 common utimes sys_utimes compat_sys_utimes
296252 common statfs64 sys_statfs64 compat_sys_statfs64
297253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
298254 32 fadvise64_64 ppc_fadvise64_64
299254 spu fadvise64_64 sys_ni_syscall
300255 common rtas sys_rtas
301256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall
302256 64 sys_debug_setcontext sys_ni_syscall
303256 spu sys_debug_setcontext sys_ni_syscall
304# 257 reserved for vserver
305258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages
306259 nospu mbind sys_mbind compat_sys_mbind
307260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
308261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
309262 nospu mq_open sys_mq_open compat_sys_mq_open
310263 nospu mq_unlink sys_mq_unlink
311264 nospu mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
312265 nospu mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
313266 nospu mq_notify sys_mq_notify compat_sys_mq_notify
314267 nospu mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
315268 nospu kexec_load sys_kexec_load compat_sys_kexec_load
316269 nospu add_key sys_add_key
317270 nospu request_key sys_request_key
318271 nospu keyctl sys_keyctl compat_sys_keyctl
319272 nospu waitid sys_waitid compat_sys_waitid
320273 nospu ioprio_set sys_ioprio_set
321274 nospu ioprio_get sys_ioprio_get
322275 nospu inotify_init sys_inotify_init
323276 nospu inotify_add_watch sys_inotify_add_watch
324277 nospu inotify_rm_watch sys_inotify_rm_watch
325278 nospu spu_run sys_spu_run
326279 nospu spu_create sys_spu_create
327280 nospu pselect6 sys_pselect6 compat_sys_pselect6
328281 nospu ppoll sys_ppoll compat_sys_ppoll
329282 common unshare sys_unshare
330283 common splice sys_splice
331284 common tee sys_tee
332285 common vmsplice sys_vmsplice compat_sys_vmsplice
333286 common openat sys_openat compat_sys_openat
334287 common mkdirat sys_mkdirat
335288 common mknodat sys_mknodat
336289 common fchownat sys_fchownat
337290 common futimesat sys_futimesat compat_sys_futimesat
338291 32 fstatat64 sys_fstatat64
339291 64 newfstatat sys_newfstatat
340291 spu newfstatat sys_newfstatat
341292 common unlinkat sys_unlinkat
342293 common renameat sys_renameat
343294 common linkat sys_linkat
344295 common symlinkat sys_symlinkat
345296 common readlinkat sys_readlinkat
346297 common fchmodat sys_fchmodat
347298 common faccessat sys_faccessat
348299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
349300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
350301 common move_pages sys_move_pages compat_sys_move_pages
351302 common getcpu sys_getcpu
352303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
353304 common utimensat sys_utimensat compat_sys_utimensat
354305 common signalfd sys_signalfd compat_sys_signalfd
355306 common timerfd_create sys_timerfd_create
356307 common eventfd sys_eventfd
357308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2
358309 nospu fallocate sys_fallocate compat_sys_fallocate
359310 nospu subpage_prot sys_subpage_prot
360311 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
361312 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
362313 common signalfd4 sys_signalfd4 compat_sys_signalfd4
363314 common eventfd2 sys_eventfd2
364315 common epoll_create1 sys_epoll_create1
365316 common dup3 sys_dup3
366317 common pipe2 sys_pipe2
367318 nospu inotify_init1 sys_inotify_init1
368319 common perf_event_open sys_perf_event_open
369320 common preadv sys_preadv compat_sys_preadv
370321 common pwritev sys_pwritev compat_sys_pwritev
371322 nospu rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
372323 nospu fanotify_init sys_fanotify_init
373324 nospu fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
374325 common prlimit64 sys_prlimit64
375326 common socket sys_socket
376327 common bind sys_bind
377328 common connect sys_connect
378329 common listen sys_listen
379330 common accept sys_accept
380331 common getsockname sys_getsockname
381332 common getpeername sys_getpeername
382333 common socketpair sys_socketpair
383334 common send sys_send
384335 common sendto sys_sendto
385336 common recv sys_recv compat_sys_recv
386337 common recvfrom sys_recvfrom compat_sys_recvfrom
387338 common shutdown sys_shutdown
388339 common setsockopt sys_setsockopt compat_sys_setsockopt
389340 common getsockopt sys_getsockopt compat_sys_getsockopt
390341 common sendmsg sys_sendmsg compat_sys_sendmsg
391342 common recvmsg sys_recvmsg compat_sys_recvmsg
392343 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
393344 common accept4 sys_accept4
394345 common name_to_handle_at sys_name_to_handle_at
395346 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
396347 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
397348 common syncfs sys_syncfs
398349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
399350 common setns sys_setns
400351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
401352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
402353 nospu finit_module sys_finit_module
403354 nospu kcmp sys_kcmp
404355 common sched_setattr sys_sched_setattr
405356 common sched_getattr sys_sched_getattr
406357 common renameat2 sys_renameat2
407358 common seccomp sys_seccomp
408359 common getrandom sys_getrandom
409360 common memfd_create sys_memfd_create
410361 common bpf sys_bpf
411362 nospu execveat sys_execveat compat_sys_execveat
412363 32 switch_endian sys_ni_syscall
413363 64 switch_endian ppc_switch_endian
414363 spu switch_endian sys_ni_syscall
415364 common userfaultfd sys_userfaultfd
416365 common membarrier sys_membarrier
417378 nospu mlock2 sys_mlock2
418379 nospu copy_file_range sys_copy_file_range
419380 common preadv2 sys_preadv2 compat_sys_preadv2
420381 common pwritev2 sys_pwritev2 compat_sys_pwritev2
421382 nospu kexec_file_load sys_kexec_file_load
422383 nospu statx sys_statx
423384 nospu pkey_alloc sys_pkey_alloc
424385 nospu pkey_free sys_pkey_free
425386 nospu pkey_mprotect sys_pkey_mprotect
426387 nospu rseq sys_rseq
427388 nospu io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h
index 1076393e6f43..e18a3556f5e3 100644
--- a/tools/perf/arch/powerpc/include/perf_regs.h
+++ b/tools/perf/arch/powerpc/include/perf_regs.h
@@ -63,7 +63,8 @@ static const char *reg_names[] = {
63 [PERF_REG_POWERPC_TRAP] = "trap", 63 [PERF_REG_POWERPC_TRAP] = "trap",
64 [PERF_REG_POWERPC_DAR] = "dar", 64 [PERF_REG_POWERPC_DAR] = "dar",
65 [PERF_REG_POWERPC_DSISR] = "dsisr", 65 [PERF_REG_POWERPC_DSISR] = "dsisr",
66 [PERF_REG_POWERPC_SIER] = "sier" 66 [PERF_REG_POWERPC_SIER] = "sier",
67 [PERF_REG_POWERPC_MMCRA] = "mmcra"
67}; 68};
68 69
69static inline const char *perf_reg_name(int id) 70static inline const char *perf_reg_name(int id)
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index 2e6595310420..ba98bd006488 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -2,6 +2,7 @@ libperf-y += header.o
2libperf-y += sym-handling.o 2libperf-y += sym-handling.o
3libperf-y += kvm-stat.o 3libperf-y += kvm-stat.o
4libperf-y += perf_regs.o 4libperf-y += perf_regs.o
5libperf-y += mem-events.o
5 6
6libperf-$(CONFIG_DWARF) += dwarf-regs.o 7libperf-$(CONFIG_DWARF) += dwarf-regs.o
7libperf-$(CONFIG_DWARF) += skip-callchain-idx.o 8libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c
new file mode 100644
index 000000000000..d08311f04e95
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/mem-events.c
@@ -0,0 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0
2#include "mem-events.h"
3
4/* PowerPC does not support 'ldlat' parameter. */
5char *perf_mem_events__name(int i)
6{
7 if (i == PERF_MEM_EVENTS__LOAD)
8 return (char *) "cpu/mem-loads/";
9
10 return (char *) "cpu/mem-stores/";
11}
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c
index 07fcd977d93e..34d5134681d9 100644
--- a/tools/perf/arch/powerpc/util/perf_regs.c
+++ b/tools/perf/arch/powerpc/util/perf_regs.c
@@ -53,6 +53,7 @@ const struct sample_reg sample_reg_masks[] = {
53 SMPL_REG(dar, PERF_REG_POWERPC_DAR), 53 SMPL_REG(dar, PERF_REG_POWERPC_DAR),
54 SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), 54 SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR),
55 SMPL_REG(sier, PERF_REG_POWERPC_SIER), 55 SMPL_REG(sier, PERF_REG_POWERPC_SIER),
56 SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA),
56 SMPL_REG_END 57 SMPL_REG_END
57}; 58};
58 59
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index d079f36d342d..ac221f137ed2 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1681,13 +1681,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
1681 .force_header = false, 1681 .force_header = false,
1682 }; 1682 };
1683 struct perf_evsel *ev2; 1683 struct perf_evsel *ev2;
1684 static bool init;
1685 u64 val; 1684 u64 val;
1686 1685
1687 if (!init) {
1688 perf_stat__init_shadow_stats();
1689 init = true;
1690 }
1691 if (!evsel->stats) 1686 if (!evsel->stats)
1692 perf_evlist__alloc_stats(script->session->evlist, false); 1687 perf_evlist__alloc_stats(script->session->evlist, false);
1693 if (evsel_script(evsel->leader)->gnum++ == 0) 1688 if (evsel_script(evsel->leader)->gnum++ == 0)
@@ -1794,7 +1789,7 @@ static void process_event(struct perf_script *script,
1794 return; 1789 return;
1795 } 1790 }
1796 1791
1797 if (PRINT_FIELD(TRACE)) { 1792 if (PRINT_FIELD(TRACE) && sample->raw_data) {
1798 event_format__fprintf(evsel->tp_format, sample->cpu, 1793 event_format__fprintf(evsel->tp_format, sample->cpu,
1799 sample->raw_data, sample->raw_size, fp); 1794 sample->raw_data, sample->raw_size, fp);
1800 } 1795 }
@@ -2359,6 +2354,8 @@ static int __cmd_script(struct perf_script *script)
2359 2354
2360 signal(SIGINT, sig_handler); 2355 signal(SIGINT, sig_handler);
2361 2356
2357 perf_stat__init_shadow_stats();
2358
2362 /* override event processing functions */ 2359 /* override event processing functions */
2363 if (script->show_task_events) { 2360 if (script->show_task_events) {
2364 script->tool.comm = process_comm_event; 2361 script->tool.comm = process_comm_event;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 1410d66192f7..63a3afc7f32b 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -561,7 +561,8 @@ try_again:
561 break; 561 break;
562 } 562 }
563 } 563 }
564 wait4(child_pid, &status, 0, &stat_config.ru_data); 564 if (child_pid != -1)
565 wait4(child_pid, &status, 0, &stat_config.ru_data);
565 566
566 if (workload_exec_errno) { 567 if (workload_exec_errno) {
567 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 568 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index fe3ecfb2e64b..f64e312db787 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1028,12 +1028,7 @@ out_err:
1028 1028
1029static int callchain_param__setup_sample_type(struct callchain_param *callchain) 1029static int callchain_param__setup_sample_type(struct callchain_param *callchain)
1030{ 1030{
1031 if (!perf_hpp_list.sym) { 1031 if (callchain->mode != CHAIN_NONE) {
1032 if (callchain->enabled) {
1033 ui__error("Selected -g but \"sym\" not present in --sort/-s.");
1034 return -EINVAL;
1035 }
1036 } else if (callchain->mode != CHAIN_NONE) {
1037 if (callchain_register_param(callchain) < 0) { 1032 if (callchain_register_param(callchain) < 0) {
1038 ui__error("Can't register callchain params.\n"); 1033 ui__error("Can't register callchain params.\n");
1039 return -EINVAL; 1034 return -EINVAL;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index adbf28183560..b36061cd1ab8 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1758,6 +1758,7 @@ static int trace__printf_interrupted_entry(struct trace *trace)
1758{ 1758{
1759 struct thread_trace *ttrace; 1759 struct thread_trace *ttrace;
1760 size_t printed; 1760 size_t printed;
1761 int len;
1761 1762
1762 if (trace->failure_only || trace->current == NULL) 1763 if (trace->failure_only || trace->current == NULL)
1763 return 0; 1764 return 0;
@@ -1768,9 +1769,14 @@ static int trace__printf_interrupted_entry(struct trace *trace)
1768 return 0; 1769 return 0;
1769 1770
1770 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 1771 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
1771 printed += fprintf(trace->output, ")%-*s ...\n", trace->args_alignment, ttrace->entry_str); 1772 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
1772 ttrace->entry_pending = false; 1773
1774 if (len < trace->args_alignment - 4)
1775 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
1776
1777 printed += fprintf(trace->output, " ...\n");
1773 1778
1779 ttrace->entry_pending = false;
1774 ++trace->nr_events_printed; 1780 ++trace->nr_events_printed;
1775 1781
1776 return printed; 1782 return printed;
@@ -2026,9 +2032,10 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
2026 if (ttrace->entry_pending) { 2032 if (ttrace->entry_pending) {
2027 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2033 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2028 } else { 2034 } else {
2029 fprintf(trace->output, " ... ["); 2035 printed += fprintf(trace->output, " ... [");
2030 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2036 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2031 fprintf(trace->output, "]: %s()", sc->name); 2037 printed += 9;
2038 printed += fprintf(trace->output, "]: %s()", sc->name);
2032 } 2039 }
2033 2040
2034 printed++; /* the closing ')' */ 2041 printed++; /* the closing ')' */
@@ -2507,19 +2514,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2507 2514
2508static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist) 2515static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
2509{ 2516{
2510 struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname"); 2517 bool found = false;
2518 struct perf_evsel *evsel, *tmp;
2519 struct parse_events_error err = { .idx = 0, };
2520 int ret = parse_events(evlist, "probe:vfs_getname*", &err);
2511 2521
2512 if (IS_ERR(evsel)) 2522 if (ret)
2513 return false; 2523 return false;
2514 2524
2515 if (perf_evsel__field(evsel, "pathname") == NULL) { 2525 evlist__for_each_entry_safe(evlist, evsel, tmp) {
2526 if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
2527 continue;
2528
2529 if (perf_evsel__field(evsel, "pathname")) {
2530 evsel->handler = trace__vfs_getname;
2531 found = true;
2532 continue;
2533 }
2534
2535 list_del_init(&evsel->node);
2536 evsel->evlist = NULL;
2516 perf_evsel__delete(evsel); 2537 perf_evsel__delete(evsel);
2517 return false;
2518 } 2538 }
2519 2539
2520 evsel->handler = trace__vfs_getname; 2540 return found;
2521 perf_evlist__add(evlist, evsel);
2522 return true;
2523} 2541}
2524 2542
2525static struct perf_evsel *perf_evsel__new_pgfault(u64 config) 2543static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 6cb98f8570a2..7b55613924de 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -10,6 +10,7 @@ include/uapi/linux/fs.h
10include/uapi/linux/kcmp.h 10include/uapi/linux/kcmp.h
11include/uapi/linux/kvm.h 11include/uapi/linux/kvm.h
12include/uapi/linux/in.h 12include/uapi/linux/in.h
13include/uapi/linux/mount.h
13include/uapi/linux/perf_event.h 14include/uapi/linux/perf_event.h
14include/uapi/linux/prctl.h 15include/uapi/linux/prctl.h
15include/uapi/linux/sched.h 16include/uapi/linux/sched.h
@@ -49,7 +50,6 @@ arch/parisc/include/uapi/asm/errno.h
49arch/powerpc/include/uapi/asm/errno.h 50arch/powerpc/include/uapi/asm/errno.h
50arch/sparc/include/uapi/asm/errno.h 51arch/sparc/include/uapi/asm/errno.h
51arch/x86/include/uapi/asm/errno.h 52arch/x86/include/uapi/asm/errno.h
52arch/powerpc/include/uapi/asm/unistd.h
53include/asm-generic/bitops/arch_hweight.h 53include/asm-generic/bitops/arch_hweight.h
54include/asm-generic/bitops/const_hweight.h 54include/asm-generic/bitops/const_hweight.h
55include/asm-generic/bitops/__fls.h 55include/asm-generic/bitops/__fls.h
diff --git a/tools/perf/perf-read-vdso.c b/tools/perf/perf-read-vdso.c
index 8c0ca0cc428f..aaa5210ea84a 100644
--- a/tools/perf/perf-read-vdso.c
+++ b/tools/perf/perf-read-vdso.c
@@ -5,17 +5,17 @@
5#define VDSO__MAP_NAME "[vdso]" 5#define VDSO__MAP_NAME "[vdso]"
6 6
7/* 7/*
8 * Include definition of find_vdso_map() also used in util/vdso.c for 8 * Include definition of find_map() also used in util/vdso.c for
9 * building perf. 9 * building perf.
10 */ 10 */
11#include "util/find-vdso-map.c" 11#include "util/find-map.c"
12 12
13int main(void) 13int main(void)
14{ 14{
15 void *start, *end; 15 void *start, *end;
16 size_t size, written; 16 size_t size, written;
17 17
18 if (find_vdso_map(&start, &end)) 18 if (find_map(&start, &end, VDSO__MAP_NAME))
19 return 1; 19 return 1;
20 20
21 size = end - start; 21 size = end - start;
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index 44090a9a19f3..e952127e4fb0 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -1,6 +1,8 @@
1#! /usr/bin/python 1#! /usr/bin/python
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4from __future__ import print_function
5
4import os 6import os
5import sys 7import sys
6import glob 8import glob
@@ -8,7 +10,11 @@ import optparse
8import tempfile 10import tempfile
9import logging 11import logging
10import shutil 12import shutil
11import ConfigParser 13
14try:
15 import configparser
16except ImportError:
17 import ConfigParser as configparser
12 18
13def data_equal(a, b): 19def data_equal(a, b):
14 # Allow multiple values in assignment separated by '|' 20 # Allow multiple values in assignment separated by '|'
@@ -100,20 +106,20 @@ class Event(dict):
100 def equal(self, other): 106 def equal(self, other):
101 for t in Event.terms: 107 for t in Event.terms:
102 log.debug(" [%s] %s %s" % (t, self[t], other[t])); 108 log.debug(" [%s] %s %s" % (t, self[t], other[t]));
103 if not self.has_key(t) or not other.has_key(t): 109 if t not in self or t not in other:
104 return False 110 return False
105 if not data_equal(self[t], other[t]): 111 if not data_equal(self[t], other[t]):
106 return False 112 return False
107 return True 113 return True
108 114
109 def optional(self): 115 def optional(self):
110 if self.has_key('optional') and self['optional'] == '1': 116 if 'optional' in self and self['optional'] == '1':
111 return True 117 return True
112 return False 118 return False
113 119
114 def diff(self, other): 120 def diff(self, other):
115 for t in Event.terms: 121 for t in Event.terms:
116 if not self.has_key(t) or not other.has_key(t): 122 if t not in self or t not in other:
117 continue 123 continue
118 if not data_equal(self[t], other[t]): 124 if not data_equal(self[t], other[t]):
119 log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) 125 log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
@@ -134,7 +140,7 @@ class Event(dict):
134# - expected values assignments 140# - expected values assignments
135class Test(object): 141class Test(object):
136 def __init__(self, path, options): 142 def __init__(self, path, options):
137 parser = ConfigParser.SafeConfigParser() 143 parser = configparser.SafeConfigParser()
138 parser.read(path) 144 parser.read(path)
139 145
140 log.warning("running '%s'" % path) 146 log.warning("running '%s'" % path)
@@ -193,7 +199,7 @@ class Test(object):
193 return True 199 return True
194 200
195 def load_events(self, path, events): 201 def load_events(self, path, events):
196 parser_event = ConfigParser.SafeConfigParser() 202 parser_event = configparser.SafeConfigParser()
197 parser_event.read(path) 203 parser_event.read(path)
198 204
199 # The event record section header contains 'event' word, 205 # The event record section header contains 'event' word,
@@ -207,7 +213,7 @@ class Test(object):
207 # Read parent event if there's any 213 # Read parent event if there's any
208 if (':' in section): 214 if (':' in section):
209 base = section[section.index(':') + 1:] 215 base = section[section.index(':') + 1:]
210 parser_base = ConfigParser.SafeConfigParser() 216 parser_base = configparser.SafeConfigParser()
211 parser_base.read(self.test_dir + '/' + base) 217 parser_base.read(self.test_dir + '/' + base)
212 base_items = parser_base.items('event') 218 base_items = parser_base.items('event')
213 219
@@ -322,9 +328,9 @@ def run_tests(options):
322 for f in glob.glob(options.test_dir + '/' + options.test): 328 for f in glob.glob(options.test_dir + '/' + options.test):
323 try: 329 try:
324 Test(f, options).run() 330 Test(f, options).run()
325 except Unsup, obj: 331 except Unsup as obj:
326 log.warning("unsupp %s" % obj.getMsg()) 332 log.warning("unsupp %s" % obj.getMsg())
327 except Notest, obj: 333 except Notest as obj:
328 log.warning("skipped %s" % obj.getMsg()) 334 log.warning("skipped %s" % obj.getMsg())
329 335
330def setup_log(verbose): 336def setup_log(verbose):
@@ -363,7 +369,7 @@ def main():
363 parser.add_option("-p", "--perf", 369 parser.add_option("-p", "--perf",
364 action="store", type="string", dest="perf") 370 action="store", type="string", dest="perf")
365 parser.add_option("-v", "--verbose", 371 parser.add_option("-v", "--verbose",
366 action="count", dest="verbose") 372 default=0, action="count", dest="verbose")
367 373
368 options, args = parser.parse_args() 374 options, args = parser.parse_args()
369 if args: 375 if args:
@@ -373,7 +379,7 @@ def main():
373 setup_log(options.verbose) 379 setup_log(options.verbose)
374 380
375 if not options.test_dir: 381 if not options.test_dir:
376 print 'FAILED no -d option specified' 382 print('FAILED no -d option specified')
377 sys.exit(-1) 383 sys.exit(-1)
378 384
379 if not options.test: 385 if not options.test:
@@ -382,8 +388,8 @@ def main():
382 try: 388 try:
383 run_tests(options) 389 run_tests(options)
384 390
385 except Fail, obj: 391 except Fail as obj:
386 print "FAILED %s" % obj.getMsg(); 392 print("FAILED %s" % obj.getMsg())
387 sys.exit(-1) 393 sys.exit(-1)
388 394
389 sys.exit(0) 395 sys.exit(0)
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 5f8501c68da4..5cbba70bcdd0 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
17 return -1; 17 return -1;
18 } 18 }
19 19
20 is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED); 20 is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED);
21 if (should_be_signed && !is_signed) { 21 if (should_be_signed && !is_signed) {
22 pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", 22 pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
23 evsel->name, name, is_signed, should_be_signed); 23 evsel->name, name, is_signed, should_be_signed);
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 1c16e56cd93e..7cb99b433888 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -13,7 +13,8 @@ add_probe_vfs_getname() {
13 local verbose=$1 13 local verbose=$1
14 if [ $had_vfs_getname -eq 1 ] ; then 14 if [ $had_vfs_getname -eq 1 ] ; then
15 line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') 15 line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
16 perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string" 16 perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
17 perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
17 fi 18 fi
18} 19}
19 20
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index b82f55fcc294..399f18ca71a3 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -119,4 +119,9 @@ int test__arch_unwind_sample(struct perf_sample *sample,
119 struct thread *thread); 119 struct thread *thread);
120#endif 120#endif
121#endif 121#endif
122
123#if defined(__arm__)
124int test__vectors_page(struct test *test, int subtest);
125#endif
126
122#endif /* TESTS_H */ 127#endif /* TESTS_H */
diff --git a/tools/perf/trace/beauty/mount_flags.sh b/tools/perf/trace/beauty/mount_flags.sh
index 45547573a1db..847850b2ef6c 100755
--- a/tools/perf/trace/beauty/mount_flags.sh
+++ b/tools/perf/trace/beauty/mount_flags.sh
@@ -5,11 +5,11 @@
5 5
6printf "static const char *mount_flags[] = {\n" 6printf "static const char *mount_flags[] = {\n"
7regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*' 7regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*'
8egrep $regex ${header_dir}/fs.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \ 8egrep $regex ${header_dir}/mount.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \
9 sed -r "s/$regex/\2 \2 \1/g" | sort -n | \ 9 sed -r "s/$regex/\2 \2 \1/g" | sort -n | \
10 xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n" 10 xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n"
11regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*' 11regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*'
12egrep $regex ${header_dir}/fs.h | \ 12egrep $regex ${header_dir}/mount.h | \
13 sed -r "s/$regex/\2 \1/g" | \ 13 sed -r "s/$regex/\2 \1/g" | \
14 xargs printf "\t[%s + 1] = \"%s\",\n" 14 xargs printf "\t[%s + 1] = \"%s\",\n"
15printf "};\n" 15printf "};\n"
diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh
index d32f8f1124af..3109d7b05e11 100755
--- a/tools/perf/trace/beauty/prctl_option.sh
+++ b/tools/perf/trace/beauty/prctl_option.sh
@@ -4,7 +4,7 @@
4[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ 4[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
5 5
6printf "static const char *prctl_options[] = {\n" 6printf "static const char *prctl_options[] = {\n"
7regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*' 7regex='^#define[[:space:]]+PR_(\w+)[[:space:]]*([[:xdigit:]]+).*'
8egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \ 8egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \
9 sed -r "s/$regex/\2 \1/g" | \ 9 sed -r "s/$regex/\2 \1/g" | \
10 sort -n | xargs printf "\t[%s] = \"%s\",\n" 10 sort -n | xargs printf "\t[%s] = \"%s\",\n"
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 1d00e5ec7906..82e16bf84466 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -224,20 +224,24 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
224 return ret; 224 return ret;
225} 225}
226 226
227static int disasm__cmp(struct annotation_line *a, struct annotation_line *b) 227static double disasm__cmp(struct annotation_line *a, struct annotation_line *b,
228 int percent_type)
228{ 229{
229 int i; 230 int i;
230 231
231 for (i = 0; i < a->data_nr; i++) { 232 for (i = 0; i < a->data_nr; i++) {
232 if (a->data[i].percent == b->data[i].percent) 233 if (a->data[i].percent[percent_type] == b->data[i].percent[percent_type])
233 continue; 234 continue;
234 return a->data[i].percent < b->data[i].percent; 235 return a->data[i].percent[percent_type] -
236 b->data[i].percent[percent_type];
235 } 237 }
236 return 0; 238 return 0;
237} 239}
238 240
239static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al) 241static void disasm_rb_tree__insert(struct annotate_browser *browser,
242 struct annotation_line *al)
240{ 243{
244 struct rb_root *root = &browser->entries;
241 struct rb_node **p = &root->rb_node; 245 struct rb_node **p = &root->rb_node;
242 struct rb_node *parent = NULL; 246 struct rb_node *parent = NULL;
243 struct annotation_line *l; 247 struct annotation_line *l;
@@ -246,7 +250,7 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line
246 parent = *p; 250 parent = *p;
247 l = rb_entry(parent, struct annotation_line, rb_node); 251 l = rb_entry(parent, struct annotation_line, rb_node);
248 252
249 if (disasm__cmp(al, l)) 253 if (disasm__cmp(al, l, browser->opts->percent_type) < 0)
250 p = &(*p)->rb_left; 254 p = &(*p)->rb_left;
251 else 255 else
252 p = &(*p)->rb_right; 256 p = &(*p)->rb_right;
@@ -329,7 +333,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
329 RB_CLEAR_NODE(&pos->al.rb_node); 333 RB_CLEAR_NODE(&pos->al.rb_node);
330 continue; 334 continue;
331 } 335 }
332 disasm_rb_tree__insert(&browser->entries, &pos->al); 336 disasm_rb_tree__insert(browser, &pos->al);
333 } 337 }
334 pthread_mutex_unlock(&notes->lock); 338 pthread_mutex_unlock(&notes->lock);
335 339
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index ac9805e0bc76..70de8f6b3aee 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1723,15 +1723,14 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
1723 err = asprintf(&command, 1723 err = asprintf(&command,
1724 "%s %s%s --start-address=0x%016" PRIx64 1724 "%s %s%s --start-address=0x%016" PRIx64
1725 " --stop-address=0x%016" PRIx64 1725 " --stop-address=0x%016" PRIx64
1726 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", 1726 " -l -d %s %s -C \"$1\" 2>/dev/null|grep -v \"$1:\"|expand",
1727 opts->objdump_path ?: "objdump", 1727 opts->objdump_path ?: "objdump",
1728 opts->disassembler_style ? "-M " : "", 1728 opts->disassembler_style ? "-M " : "",
1729 opts->disassembler_style ?: "", 1729 opts->disassembler_style ?: "",
1730 map__rip_2objdump(map, sym->start), 1730 map__rip_2objdump(map, sym->start),
1731 map__rip_2objdump(map, sym->end), 1731 map__rip_2objdump(map, sym->end),
1732 opts->show_asm_raw ? "" : "--no-show-raw", 1732 opts->show_asm_raw ? "" : "--no-show-raw",
1733 opts->annotate_src ? "-S" : "", 1733 opts->annotate_src ? "-S" : "");
1734 symfs_filename, symfs_filename);
1735 1734
1736 if (err < 0) { 1735 if (err < 0) {
1737 pr_err("Failure allocating memory for the command to run\n"); 1736 pr_err("Failure allocating memory for the command to run\n");
@@ -1756,7 +1755,8 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
1756 close(stdout_fd[0]); 1755 close(stdout_fd[0]);
1757 dup2(stdout_fd[1], 1); 1756 dup2(stdout_fd[1], 1);
1758 close(stdout_fd[1]); 1757 close(stdout_fd[1]);
1759 execl("/bin/sh", "sh", "-c", command, NULL); 1758 execl("/bin/sh", "sh", "-c", command, "--", symfs_filename,
1759 NULL);
1760 perror(command); 1760 perror(command);
1761 exit(-1); 1761 exit(-1);
1762 } 1762 }
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
index 89512504551b..39c0004f2886 100644
--- a/tools/perf/util/c++/clang.cpp
+++ b/tools/perf/util/c++/clang.cpp
@@ -160,7 +160,7 @@ getBPFObjectFromModule(llvm::Module *Module)
160 } 160 }
161 PM.run(*Module); 161 PM.run(*Module);
162 162
163 return std::move(Buffer); 163 return Buffer;
164} 164}
165 165
166} 166}
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 32ef7bdca1cf..dc2212e12184 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -766,6 +766,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
766 cnode->cycles_count += node->branch_flags.cycles; 766 cnode->cycles_count += node->branch_flags.cycles;
767 cnode->iter_count += node->nr_loop_iter; 767 cnode->iter_count += node->nr_loop_iter;
768 cnode->iter_cycles += node->iter_cycles; 768 cnode->iter_cycles += node->iter_cycles;
769 cnode->from_count++;
769 } 770 }
770 } 771 }
771 772
@@ -1345,10 +1346,10 @@ static int branch_to_str(char *bf, int bfsize,
1345static int branch_from_str(char *bf, int bfsize, 1346static int branch_from_str(char *bf, int bfsize,
1346 u64 branch_count, 1347 u64 branch_count,
1347 u64 cycles_count, u64 iter_count, 1348 u64 cycles_count, u64 iter_count,
1348 u64 iter_cycles) 1349 u64 iter_cycles, u64 from_count)
1349{ 1350{
1350 int printed = 0, i = 0; 1351 int printed = 0, i = 0;
1351 u64 cycles; 1352 u64 cycles, v = 0;
1352 1353
1353 cycles = cycles_count / branch_count; 1354 cycles = cycles_count / branch_count;
1354 if (cycles) { 1355 if (cycles) {
@@ -1357,14 +1358,16 @@ static int branch_from_str(char *bf, int bfsize,
1357 bf + printed, bfsize - printed); 1358 bf + printed, bfsize - printed);
1358 } 1359 }
1359 1360
1360 if (iter_count) { 1361 if (iter_count && from_count) {
1361 printed += count_pri64_printf(i++, "iter", 1362 v = iter_count / from_count;
1362 iter_count, 1363 if (v) {
1363 bf + printed, bfsize - printed); 1364 printed += count_pri64_printf(i++, "iter",
1365 v, bf + printed, bfsize - printed);
1364 1366
1365 printed += count_pri64_printf(i++, "avg_cycles", 1367 printed += count_pri64_printf(i++, "avg_cycles",
1366 iter_cycles / iter_count, 1368 iter_cycles / iter_count,
1367 bf + printed, bfsize - printed); 1369 bf + printed, bfsize - printed);
1370 }
1368 } 1371 }
1369 1372
1370 if (i) 1373 if (i)
@@ -1377,6 +1380,7 @@ static int counts_str_build(char *bf, int bfsize,
1377 u64 branch_count, u64 predicted_count, 1380 u64 branch_count, u64 predicted_count,
1378 u64 abort_count, u64 cycles_count, 1381 u64 abort_count, u64 cycles_count,
1379 u64 iter_count, u64 iter_cycles, 1382 u64 iter_count, u64 iter_cycles,
1383 u64 from_count,
1380 struct branch_type_stat *brtype_stat) 1384 struct branch_type_stat *brtype_stat)
1381{ 1385{
1382 int printed; 1386 int printed;
@@ -1389,7 +1393,8 @@ static int counts_str_build(char *bf, int bfsize,
1389 predicted_count, abort_count, brtype_stat); 1393 predicted_count, abort_count, brtype_stat);
1390 } else { 1394 } else {
1391 printed = branch_from_str(bf, bfsize, branch_count, 1395 printed = branch_from_str(bf, bfsize, branch_count,
1392 cycles_count, iter_count, iter_cycles); 1396 cycles_count, iter_count, iter_cycles,
1397 from_count);
1393 } 1398 }
1394 1399
1395 if (!printed) 1400 if (!printed)
@@ -1402,13 +1407,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1402 u64 branch_count, u64 predicted_count, 1407 u64 branch_count, u64 predicted_count,
1403 u64 abort_count, u64 cycles_count, 1408 u64 abort_count, u64 cycles_count,
1404 u64 iter_count, u64 iter_cycles, 1409 u64 iter_count, u64 iter_cycles,
1410 u64 from_count,
1405 struct branch_type_stat *brtype_stat) 1411 struct branch_type_stat *brtype_stat)
1406{ 1412{
1407 char str[256]; 1413 char str[256];
1408 1414
1409 counts_str_build(str, sizeof(str), branch_count, 1415 counts_str_build(str, sizeof(str), branch_count,
1410 predicted_count, abort_count, cycles_count, 1416 predicted_count, abort_count, cycles_count,
1411 iter_count, iter_cycles, brtype_stat); 1417 iter_count, iter_cycles, from_count, brtype_stat);
1412 1418
1413 if (fp) 1419 if (fp)
1414 return fprintf(fp, "%s", str); 1420 return fprintf(fp, "%s", str);
@@ -1422,6 +1428,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
1422 u64 branch_count, predicted_count; 1428 u64 branch_count, predicted_count;
1423 u64 abort_count, cycles_count; 1429 u64 abort_count, cycles_count;
1424 u64 iter_count, iter_cycles; 1430 u64 iter_count, iter_cycles;
1431 u64 from_count;
1425 1432
1426 branch_count = clist->branch_count; 1433 branch_count = clist->branch_count;
1427 predicted_count = clist->predicted_count; 1434 predicted_count = clist->predicted_count;
@@ -1429,11 +1436,12 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
1429 cycles_count = clist->cycles_count; 1436 cycles_count = clist->cycles_count;
1430 iter_count = clist->iter_count; 1437 iter_count = clist->iter_count;
1431 iter_cycles = clist->iter_cycles; 1438 iter_cycles = clist->iter_cycles;
1439 from_count = clist->from_count;
1432 1440
1433 return callchain_counts_printf(fp, bf, bfsize, branch_count, 1441 return callchain_counts_printf(fp, bf, bfsize, branch_count,
1434 predicted_count, abort_count, 1442 predicted_count, abort_count,
1435 cycles_count, iter_count, iter_cycles, 1443 cycles_count, iter_count, iter_cycles,
1436 &clist->brtype_stat); 1444 from_count, &clist->brtype_stat);
1437} 1445}
1438 1446
1439static void free_callchain_node(struct callchain_node *node) 1447static void free_callchain_node(struct callchain_node *node)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 154560b1eb65..99d38ac019b8 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -118,6 +118,7 @@ struct callchain_list {
118 bool has_children; 118 bool has_children;
119 }; 119 };
120 u64 branch_count; 120 u64 branch_count;
121 u64 from_count;
121 u64 predicted_count; 122 u64 predicted_count;
122 u64 abort_count; 123 u64 abort_count;
123 u64 cycles_count; 124 u64 cycles_count;
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 1ccbd3342069..383674f448fc 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
134 if (!cpu_list) 134 if (!cpu_list)
135 return cpu_map__read_all_cpu_map(); 135 return cpu_map__read_all_cpu_map();
136 136
137 if (!isdigit(*cpu_list)) 137 /*
138 * must handle the case of empty cpumap to cover
139 * TOPOLOGY header for NUMA nodes with no CPU
140 * ( e.g., because of CPU hotplug)
141 */
142 if (!isdigit(*cpu_list) && *cpu_list != '\0')
138 goto out; 143 goto out;
139 144
140 while (isdigit(*cpu_list)) { 145 while (isdigit(*cpu_list)) {
@@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
181 186
182 if (nr_cpus > 0) 187 if (nr_cpus > 0)
183 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); 188 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
184 else 189 else if (*cpu_list != '\0')
185 cpus = cpu_map__default_new(); 190 cpus = cpu_map__default_new();
191 else
192 cpus = cpu_map__dummy_new();
186invalid: 193invalid:
187 free(tmp_cpus); 194 free(tmp_cpus);
188out: 195out:
diff --git a/tools/perf/util/find-vdso-map.c b/tools/perf/util/find-map.c
index d7823e3508fc..7b2300588ece 100644
--- a/tools/perf/util/find-vdso-map.c
+++ b/tools/perf/util/find-map.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2static int find_vdso_map(void **start, void **end) 2static int find_map(void **start, void **end, const char *name)
3{ 3{
4 FILE *maps; 4 FILE *maps;
5 char line[128]; 5 char line[128];
@@ -7,7 +7,7 @@ static int find_vdso_map(void **start, void **end)
7 7
8 maps = fopen("/proc/self/maps", "r"); 8 maps = fopen("/proc/self/maps", "r");
9 if (!maps) { 9 if (!maps) {
10 fprintf(stderr, "vdso: cannot open maps\n"); 10 fprintf(stderr, "cannot open maps\n");
11 return -1; 11 return -1;
12 } 12 }
13 13
@@ -21,8 +21,7 @@ static int find_vdso_map(void **start, void **end)
21 if (m < 0) 21 if (m < 0)
22 continue; 22 continue;
23 23
24 if (!strncmp(&line[m], VDSO__MAP_NAME, 24 if (!strncmp(&line[m], name, strlen(name)))
25 sizeof(VDSO__MAP_NAME) - 1))
26 found = 1; 25 found = 1;
27 } 26 }
28 27
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6fcb3bce0442..143f7057d581 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2005,7 +2005,7 @@ static void save_iterations(struct iterations *iter,
2005{ 2005{
2006 int i; 2006 int i;
2007 2007
2008 iter->nr_loop_iter = nr; 2008 iter->nr_loop_iter++;
2009 iter->cycles = 0; 2009 iter->cycles = 0;
2010 2010
2011 for (i = 0; i < nr; i++) 2011 for (i = 0; i < nr; i++)
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 93f74d8d3cdd..42c3e5a229d2 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -28,7 +28,7 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
28static char mem_loads_name[100]; 28static char mem_loads_name[100];
29static bool mem_loads_name__init; 29static bool mem_loads_name__init;
30 30
31char *perf_mem_events__name(int i) 31char * __weak perf_mem_events__name(int i)
32{ 32{
33 if (i == PERF_MEM_EVENTS__LOAD) { 33 if (i == PERF_MEM_EVENTS__LOAD) {
34 if (!mem_loads_name__init) { 34 if (!mem_loads_name__init) {
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 897589507d97..ea523d3b248f 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -391,8 +391,10 @@ void ordered_events__free(struct ordered_events *oe)
391 * Current buffer might not have all the events allocated 391 * Current buffer might not have all the events allocated
392 * yet, we need to free only allocated ones ... 392 * yet, we need to free only allocated ones ...
393 */ 393 */
394 list_del(&oe->buffer->list); 394 if (oe->buffer) {
395 ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe); 395 list_del(&oe->buffer->list);
396 ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
397 }
396 398
397 /* ... and continue with the rest */ 399 /* ... and continue with the rest */
398 list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) { 400 list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 63f758c655d5..64d1f36dee99 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -17,6 +17,8 @@ if cc == "clang":
17 vars[var] = sub("-mcet", "", vars[var]) 17 vars[var] = sub("-mcet", "", vars[var])
18 if not clang_has_option("-fcf-protection"): 18 if not clang_has_option("-fcf-protection"):
19 vars[var] = sub("-fcf-protection", "", vars[var]) 19 vars[var] = sub("-fcf-protection", "", vars[var])
20 if not clang_has_option("-fstack-clash-protection"):
21 vars[var] = sub("-fstack-clash-protection", "", vars[var])
20 22
21from distutils.core import setup, Extension 23from distutils.core import setup, Extension
22 24
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 9005fbe0780e..23092fd6451d 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -109,7 +109,6 @@ static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
109 return ret; 109 return ret;
110 } 110 }
111 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved); 111 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
112 va_end(ap_saved);
113 if (len > strbuf_avail(sb)) { 112 if (len > strbuf_avail(sb)) {
114 pr_debug("this should not happen, your vsnprintf is broken"); 113 pr_debug("this should not happen, your vsnprintf is broken");
115 va_end(ap_saved); 114 va_end(ap_saved);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 66a84d5846c8..dca7dfae69ad 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -19,6 +19,20 @@
19#define EM_AARCH64 183 /* ARM 64 bit */ 19#define EM_AARCH64 183 /* ARM 64 bit */
20#endif 20#endif
21 21
22#ifndef ELF32_ST_VISIBILITY
23#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
24#endif
25
26/* For ELF64 the definitions are the same. */
27#ifndef ELF64_ST_VISIBILITY
28#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
29#endif
30
31/* How to extract information held in the st_other field. */
32#ifndef GELF_ST_VISIBILITY
33#define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
34#endif
35
22typedef Elf64_Nhdr GElf_Nhdr; 36typedef Elf64_Nhdr GElf_Nhdr;
23 37
24#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT 38#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
@@ -87,6 +101,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
87 return GELF_ST_TYPE(sym->st_info); 101 return GELF_ST_TYPE(sym->st_info);
88} 102}
89 103
104static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
105{
106 return GELF_ST_VISIBILITY(sym->st_other);
107}
108
90#ifndef STT_GNU_IFUNC 109#ifndef STT_GNU_IFUNC
91#define STT_GNU_IFUNC 10 110#define STT_GNU_IFUNC 10
92#endif 111#endif
@@ -111,7 +130,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
111 return elf_sym__type(sym) == STT_NOTYPE && 130 return elf_sym__type(sym) == STT_NOTYPE &&
112 sym->st_name != 0 && 131 sym->st_name != 0 &&
113 sym->st_shndx != SHN_UNDEF && 132 sym->st_shndx != SHN_UNDEF &&
114 sym->st_shndx != SHN_ABS; 133 sym->st_shndx != SHN_ABS &&
134 elf_sym__visibility(sym) != STV_HIDDEN &&
135 elf_sym__visibility(sym) != STV_INTERNAL;
115} 136}
116 137
117static bool elf_sym__filter(GElf_Sym *sym) 138static bool elf_sym__filter(GElf_Sym *sym)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 01f2c7385e38..48efad6d0f90 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -614,6 +614,7 @@ out:
614static bool symbol__is_idle(const char *name) 614static bool symbol__is_idle(const char *name)
615{ 615{
616 const char * const idle_symbols[] = { 616 const char * const idle_symbols[] = {
617 "arch_cpu_idle",
617 "cpu_idle", 618 "cpu_idle",
618 "cpu_startup_entry", 619 "cpu_startup_entry",
619 "intel_idle", 620 "intel_idle",
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 741af209b19d..3702cba11d7d 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -18,10 +18,10 @@
18#include "debug.h" 18#include "debug.h"
19 19
20/* 20/*
21 * Include definition of find_vdso_map() also used in perf-read-vdso.c for 21 * Include definition of find_map() also used in perf-read-vdso.c for
22 * building perf-read-vdso32 and perf-read-vdsox32. 22 * building perf-read-vdso32 and perf-read-vdsox32.
23 */ 23 */
24#include "find-vdso-map.c" 24#include "find-map.c"
25 25
26#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX" 26#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX"
27 27
@@ -76,7 +76,7 @@ static char *get_file(struct vdso_file *vdso_file)
76 if (vdso_file->found) 76 if (vdso_file->found)
77 return vdso_file->temp_file_name; 77 return vdso_file->temp_file_name;
78 78
79 if (vdso_file->error || find_vdso_map(&start, &end)) 79 if (vdso_file->error || find_map(&start, &end, VDSO__MAP_NAME))
80 return NULL; 80 return NULL;
81 81
82 size = end - start; 82 size = end - start;
diff --git a/tools/testing/nvdimm/dimm_devs.c b/tools/testing/nvdimm/dimm_devs.c
index e75238404555..2d4baf57822f 100644
--- a/tools/testing/nvdimm/dimm_devs.c
+++ b/tools/testing/nvdimm/dimm_devs.c
@@ -18,8 +18,8 @@ ssize_t security_show(struct device *dev,
18 * For the test version we need to poll the "hardware" in order 18 * For the test version we need to poll the "hardware" in order
19 * to get the updated status for unlock testing. 19 * to get the updated status for unlock testing.
20 */ 20 */
21 nvdimm->sec.state = nvdimm_security_state(nvdimm, false); 21 nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
22 nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, true); 22 nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, NVDIMM_MASTER);
23 23
24 switch (nvdimm->sec.state) { 24 switch (nvdimm->sec.state) {
25 case NVDIMM_SECURITY_DISABLED: 25 case NVDIMM_SECURITY_DISABLED:
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 1a2bd15c5b6e..400ee81a3043 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -10,6 +10,7 @@ TARGETS += drivers/dma-buf
10TARGETS += efivarfs 10TARGETS += efivarfs
11TARGETS += exec 11TARGETS += exec
12TARGETS += filesystems 12TARGETS += filesystems
13TARGETS += filesystems/binderfs
13TARGETS += firmware 14TARGETS += firmware
14TARGETS += ftrace 15TARGETS += ftrace
15TARGETS += futex 16TARGETS += futex
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 4a9785043a39..dd093bd91aa9 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -28,3 +28,4 @@ flow_dissector_load
28test_netcnt 28test_netcnt
29test_section_names 29test_section_names
30test_tcpnotify_user 30test_tcpnotify_user
31test_libbpf
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 73aa6d8f4a2f..41ab7a3668b3 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -55,7 +55,10 @@ TEST_PROGS := test_kmod.sh \
55 test_flow_dissector.sh \ 55 test_flow_dissector.sh \
56 test_xdp_vlan.sh 56 test_xdp_vlan.sh
57 57
58TEST_PROGS_EXTENDED := with_addr.sh 58TEST_PROGS_EXTENDED := with_addr.sh \
59 with_tunnels.sh \
60 tcp_client.py \
61 tcp_server.py
59 62
60# Compile but not part of 'make run_tests' 63# Compile but not part of 'make run_tests'
61TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ 64TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 315a44fa32af..84fd6f1bf33e 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
13 unsigned int start, end, possible_cpus = 0; 13 unsigned int start, end, possible_cpus = 0;
14 char buff[128]; 14 char buff[128];
15 FILE *fp; 15 FILE *fp;
16 int n; 16 int len, n, i, j = 0;
17 17
18 fp = fopen(fcpu, "r"); 18 fp = fopen(fcpu, "r");
19 if (!fp) { 19 if (!fp) {
@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void)
21 exit(1); 21 exit(1);
22 } 22 }
23 23
24 while (fgets(buff, sizeof(buff), fp)) { 24 if (!fgets(buff, sizeof(buff), fp)) {
25 n = sscanf(buff, "%u-%u", &start, &end); 25 printf("Failed to read %s!\n", fcpu);
26 if (n == 0) { 26 exit(1);
27 printf("Failed to retrieve # possible CPUs!\n"); 27 }
28 exit(1); 28
29 } else if (n == 1) { 29 len = strlen(buff);
30 end = start; 30 for (i = 0; i <= len; i++) {
31 if (buff[i] == ',' || buff[i] == '\0') {
32 buff[i] = '\0';
33 n = sscanf(&buff[j], "%u-%u", &start, &end);
34 if (n <= 0) {
35 printf("Failed to retrieve # possible CPUs!\n");
36 exit(1);
37 } else if (n == 1) {
38 end = start;
39 }
40 possible_cpus += end - start + 1;
41 j = i + 1;
31 } 42 }
32 possible_cpus = start == 0 ? end + 1 : 0;
33 break;
34 } 43 }
44
35 fclose(fp); 45 fclose(fp);
36 46
37 return possible_cpus; 47 return possible_cpus;
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index cf16948aad4a..6692a40a6979 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -155,7 +155,7 @@ void cleanup_cgroup_environment(void)
155 * This function creates a cgroup under the top level workdir and returns the 155 * This function creates a cgroup under the top level workdir and returns the
156 * file descriptor. It is idempotent. 156 * file descriptor. It is idempotent.
157 * 157 *
158 * On success, it returns the file descriptor. On failure it returns 0. 158 * On success, it returns the file descriptor. On failure it returns -1.
159 * If there is a failure, it prints the error to stderr. 159 * If there is a failure, it prints the error to stderr.
160 */ 160 */
161int create_and_get_cgroup(const char *path) 161int create_and_get_cgroup(const char *path)
@@ -166,13 +166,13 @@ int create_and_get_cgroup(const char *path)
166 format_cgroup_path(cgroup_path, path); 166 format_cgroup_path(cgroup_path, path);
167 if (mkdir(cgroup_path, 0777) && errno != EEXIST) { 167 if (mkdir(cgroup_path, 0777) && errno != EEXIST) {
168 log_err("mkdiring cgroup %s .. %s", path, cgroup_path); 168 log_err("mkdiring cgroup %s .. %s", path, cgroup_path);
169 return 0; 169 return -1;
170 } 170 }
171 171
172 fd = open(cgroup_path, O_RDONLY); 172 fd = open(cgroup_path, O_RDONLY);
173 if (fd < 0) { 173 if (fd < 0) {
174 log_err("Opening Cgroup"); 174 log_err("Opening Cgroup");
175 return 0; 175 return -1;
176 } 176 }
177 177
178 return fd; 178 return fd;
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 8bcd38010582..91420fa83b08 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -1881,13 +1881,12 @@ static struct btf_raw_test raw_tests[] = {
1881}, 1881},
1882 1882
1883{ 1883{
1884 .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)", 1884 .descr = "func proto (TYPEDEF=>FUNC_PROTO)",
1885 .raw_types = { 1885 .raw_types = {
1886 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 1886 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
1887 BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ 1887 BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
1888 BTF_CONST_ENC(4), /* [3] */ 1888 BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
1889 BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */ 1889 BTF_FUNC_PROTO_ENC(0, 2), /* [4] */
1890 BTF_FUNC_PROTO_ENC(0, 2), /* [5] */
1891 BTF_FUNC_PROTO_ARG_ENC(0, 1), 1890 BTF_FUNC_PROTO_ARG_ENC(0, 1),
1892 BTF_FUNC_PROTO_ARG_ENC(0, 2), 1891 BTF_FUNC_PROTO_ARG_ENC(0, 2),
1893 BTF_END_RAW, 1892 BTF_END_RAW,
@@ -1901,8 +1900,6 @@ static struct btf_raw_test raw_tests[] = {
1901 .key_type_id = 1, 1900 .key_type_id = 1,
1902 .value_type_id = 1, 1901 .value_type_id = 1,
1903 .max_entries = 4, 1902 .max_entries = 4,
1904 .btf_load_err = true,
1905 .err_str = "Invalid type_id",
1906}, 1903},
1907 1904
1908{ 1905{
@@ -3526,6 +3523,8 @@ struct pprint_mapv {
3526 ENUM_TWO, 3523 ENUM_TWO,
3527 ENUM_THREE, 3524 ENUM_THREE,
3528 } aenum; 3525 } aenum;
3526 uint32_t ui32b;
3527 uint32_t bits2c:2;
3529}; 3528};
3530 3529
3531static struct btf_raw_test pprint_test_template[] = { 3530static struct btf_raw_test pprint_test_template[] = {
@@ -3568,7 +3567,7 @@ static struct btf_raw_test pprint_test_template[] = {
3568 BTF_ENUM_ENC(NAME_TBD, 2), 3567 BTF_ENUM_ENC(NAME_TBD, 2),
3569 BTF_ENUM_ENC(NAME_TBD, 3), 3568 BTF_ENUM_ENC(NAME_TBD, 3),
3570 /* struct pprint_mapv */ /* [16] */ 3569 /* struct pprint_mapv */ /* [16] */
3571 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32), 3570 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 10), 40),
3572 BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ 3571 BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */
3573 BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ 3572 BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */
3574 BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ 3573 BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */
@@ -3577,9 +3576,11 @@ static struct btf_raw_test pprint_test_template[] = {
3577 BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */ 3576 BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */
3578 BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */ 3577 BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */
3579 BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */ 3578 BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */
3579 BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */
3580 BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */
3580 BTF_END_RAW, 3581 BTF_END_RAW,
3581 }, 3582 },
3582 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), 3583 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
3583 .key_size = sizeof(unsigned int), 3584 .key_size = sizeof(unsigned int),
3584 .value_size = sizeof(struct pprint_mapv), 3585 .value_size = sizeof(struct pprint_mapv),
3585 .key_type_id = 3, /* unsigned int */ 3586 .key_type_id = 3, /* unsigned int */
@@ -3628,7 +3629,7 @@ static struct btf_raw_test pprint_test_template[] = {
3628 BTF_ENUM_ENC(NAME_TBD, 2), 3629 BTF_ENUM_ENC(NAME_TBD, 2),
3629 BTF_ENUM_ENC(NAME_TBD, 3), 3630 BTF_ENUM_ENC(NAME_TBD, 3),
3630 /* struct pprint_mapv */ /* [16] */ 3631 /* struct pprint_mapv */ /* [16] */
3631 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), 3632 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
3632 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ 3633 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
3633 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ 3634 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
3634 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ 3635 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
@@ -3637,9 +3638,11 @@ static struct btf_raw_test pprint_test_template[] = {
3637 BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */ 3638 BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */
3638 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ 3639 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
3639 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ 3640 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
3641 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
3642 BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
3640 BTF_END_RAW, 3643 BTF_END_RAW,
3641 }, 3644 },
3642 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), 3645 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
3643 .key_size = sizeof(unsigned int), 3646 .key_size = sizeof(unsigned int),
3644 .value_size = sizeof(struct pprint_mapv), 3647 .value_size = sizeof(struct pprint_mapv),
3645 .key_type_id = 3, /* unsigned int */ 3648 .key_type_id = 3, /* unsigned int */
@@ -3690,7 +3693,7 @@ static struct btf_raw_test pprint_test_template[] = {
3690 BTF_ENUM_ENC(NAME_TBD, 2), 3693 BTF_ENUM_ENC(NAME_TBD, 2),
3691 BTF_ENUM_ENC(NAME_TBD, 3), 3694 BTF_ENUM_ENC(NAME_TBD, 3),
3692 /* struct pprint_mapv */ /* [16] */ 3695 /* struct pprint_mapv */ /* [16] */
3693 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), 3696 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
3694 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ 3697 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
3695 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ 3698 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
3696 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ 3699 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
@@ -3699,13 +3702,15 @@ static struct btf_raw_test pprint_test_template[] = {
3699 BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */ 3702 BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */
3700 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ 3703 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
3701 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ 3704 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
3705 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
3706 BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
3702 /* typedef unsigned int ___int */ /* [17] */ 3707 /* typedef unsigned int ___int */ /* [17] */
3703 BTF_TYPEDEF_ENC(NAME_TBD, 18), 3708 BTF_TYPEDEF_ENC(NAME_TBD, 18),
3704 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */ 3709 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */
3705 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */ 3710 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */
3706 BTF_END_RAW, 3711 BTF_END_RAW,
3707 }, 3712 },
3708 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0___int"), 3713 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int"),
3709 .key_size = sizeof(unsigned int), 3714 .key_size = sizeof(unsigned int),
3710 .value_size = sizeof(struct pprint_mapv), 3715 .value_size = sizeof(struct pprint_mapv),
3711 .key_type_id = 3, /* unsigned int */ 3716 .key_type_id = 3, /* unsigned int */
@@ -3793,6 +3798,8 @@ static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i,
3793 v->unused_bits2b = 3; 3798 v->unused_bits2b = 3;
3794 v->ui64 = i; 3799 v->ui64 = i;
3795 v->aenum = i & 0x03; 3800 v->aenum = i & 0x03;
3801 v->ui32b = 4;
3802 v->bits2c = 1;
3796 v = (void *)v + rounded_value_size; 3803 v = (void *)v + rounded_value_size;
3797 } 3804 }
3798} 3805}
@@ -3955,7 +3962,8 @@ static int do_test_pprint(int test_num)
3955 3962
3956 nexpected_line = snprintf(expected_line, sizeof(expected_line), 3963 nexpected_line = snprintf(expected_line, sizeof(expected_line),
3957 "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," 3964 "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
3958 "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n", 3965 "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
3966 "%u,0x%x}\n",
3959 percpu_map ? "\tcpu" : "", 3967 percpu_map ? "\tcpu" : "",
3960 percpu_map ? cpu : next_key, 3968 percpu_map ? cpu : next_key,
3961 cmapv->ui32, cmapv->si32, 3969 cmapv->ui32, cmapv->si32,
@@ -3967,7 +3975,9 @@ static int do_test_pprint(int test_num)
3967 cmapv->ui8a[2], cmapv->ui8a[3], 3975 cmapv->ui8a[2], cmapv->ui8a[3],
3968 cmapv->ui8a[4], cmapv->ui8a[5], 3976 cmapv->ui8a[4], cmapv->ui8a[5],
3969 cmapv->ui8a[6], cmapv->ui8a[7], 3977 cmapv->ui8a[6], cmapv->ui8a[7],
3970 pprint_enum_str[cmapv->aenum]); 3978 pprint_enum_str[cmapv->aenum],
3979 cmapv->ui32b,
3980 cmapv->bits2c);
3971 3981
3972 err = check_line(expected_line, nexpected_line, 3982 err = check_line(expected_line, nexpected_line,
3973 sizeof(expected_line), line); 3983 sizeof(expected_line), line);
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index f44834155f25..2fc4625c1a15 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -81,7 +81,7 @@ int main(int argc, char **argv)
81 81
82 /* Create a cgroup, get fd, and join it */ 82 /* Create a cgroup, get fd, and join it */
83 cgroup_fd = create_and_get_cgroup(TEST_CGROUP); 83 cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
84 if (!cgroup_fd) { 84 if (cgroup_fd < 0) {
85 printf("Failed to create test cgroup\n"); 85 printf("Failed to create test cgroup\n");
86 goto err; 86 goto err;
87 } 87 }
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 9c8b50bac7e0..76e4993b7c16 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -43,7 +43,7 @@ int main(int argc, char **argv)
43 43
44 /* Create a cgroup, get fd, and join it */ 44 /* Create a cgroup, get fd, and join it */
45 cgroup_fd = create_and_get_cgroup(TEST_CGROUP); 45 cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
46 if (!cgroup_fd) { 46 if (cgroup_fd < 0) {
47 printf("Failed to create test cgroup\n"); 47 printf("Failed to create test cgroup\n");
48 goto err; 48 goto err;
49 } 49 }
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index 147e34cfceb7..02d7c871862a 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -474,6 +474,16 @@ static void test_lpm_delete(void)
474 assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 && 474 assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
475 errno == ENOENT); 475 errno == ENOENT);
476 476
477 key->prefixlen = 30; // unused prefix so far
478 inet_pton(AF_INET, "192.255.0.0", key->data);
479 assert(bpf_map_delete_elem(map_fd, key) == -1 &&
480 errno == ENOENT);
481
482 key->prefixlen = 16; // same prefix as the root node
483 inet_pton(AF_INET, "192.255.0.0", key->data);
484 assert(bpf_map_delete_elem(map_fd, key) == -1 &&
485 errno == ENOENT);
486
477 /* assert initial lookup */ 487 /* assert initial lookup */
478 key->prefixlen = 32; 488 key->prefixlen = 32;
479 inet_pton(AF_INET, "192.168.0.1", key->data); 489 inet_pton(AF_INET, "192.168.0.1", key->data);
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c
index 44ed7f29f8ab..c1da5404454a 100644
--- a/tools/testing/selftests/bpf/test_netcnt.c
+++ b/tools/testing/selftests/bpf/test_netcnt.c
@@ -65,7 +65,7 @@ int main(int argc, char **argv)
65 65
66 /* Create a cgroup, get fd, and join it */ 66 /* Create a cgroup, get fd, and join it */
67 cgroup_fd = create_and_get_cgroup(TEST_CGROUP); 67 cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
68 if (!cgroup_fd) { 68 if (cgroup_fd < 0) {
69 printf("Failed to create test cgroup\n"); 69 printf("Failed to create test cgroup\n");
70 goto err; 70 goto err;
71 } 71 }
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 126fc624290d..25f0083a9b2e 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -1188,7 +1188,9 @@ static void test_stacktrace_build_id(void)
1188 int i, j; 1188 int i, j;
1189 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; 1189 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1190 int build_id_matches = 0; 1190 int build_id_matches = 0;
1191 int retry = 1;
1191 1192
1193retry:
1192 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 1194 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1193 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 1195 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1194 goto out; 1196 goto out;
@@ -1301,6 +1303,19 @@ static void test_stacktrace_build_id(void)
1301 previous_key = key; 1303 previous_key = key;
1302 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1304 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1303 1305
1306 /* stack_map_get_build_id_offset() is racy and sometimes can return
1307 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
1308 * try it one more time.
1309 */
1310 if (build_id_matches < 1 && retry--) {
1311 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1312 close(pmu_fd);
1313 bpf_object__close(obj);
1314 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
1315 __func__);
1316 goto retry;
1317 }
1318
1304 if (CHECK(build_id_matches < 1, "build id match", 1319 if (CHECK(build_id_matches < 1, "build id match",
1305 "Didn't find expected build ID from the map\n")) 1320 "Didn't find expected build ID from the map\n"))
1306 goto disable_pmu; 1321 goto disable_pmu;
@@ -1341,7 +1356,9 @@ static void test_stacktrace_build_id_nmi(void)
1341 int i, j; 1356 int i, j;
1342 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; 1357 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1343 int build_id_matches = 0; 1358 int build_id_matches = 0;
1359 int retry = 1;
1344 1360
1361retry:
1345 err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); 1362 err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
1346 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 1363 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1347 return; 1364 return;
@@ -1436,6 +1453,19 @@ static void test_stacktrace_build_id_nmi(void)
1436 previous_key = key; 1453 previous_key = key;
1437 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1454 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1438 1455
1456 /* stack_map_get_build_id_offset() is racy and sometimes can return
1457 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
1458 * try it one more time.
1459 */
1460 if (build_id_matches < 1 && retry--) {
1461 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1462 close(pmu_fd);
1463 bpf_object__close(obj);
1464 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
1465 __func__);
1466 goto retry;
1467 }
1468
1439 if (CHECK(build_id_matches < 1, "build id match", 1469 if (CHECK(build_id_matches < 1, "build id match",
1440 "Didn't find expected build ID from the map\n")) 1470 "Didn't find expected build ID from the map\n"))
1441 goto disable_pmu; 1471 goto disable_pmu;
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
index c121cc59f314..9220747c069d 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
@@ -164,7 +164,7 @@ int main(int argc, char **argv)
164 goto err; 164 goto err;
165 165
166 cgfd = create_and_get_cgroup(CGROUP_PATH); 166 cgfd = create_and_get_cgroup(CGROUP_PATH);
167 if (!cgfd) 167 if (cgfd < 0)
168 goto err; 168 goto err;
169 169
170 if (join_cgroup(CGROUP_PATH)) 170 if (join_cgroup(CGROUP_PATH))
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index b8ebe2f58074..561ffb6d6433 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -458,7 +458,7 @@ int main(int argc, char **argv)
458 goto err; 458 goto err;
459 459
460 cgfd = create_and_get_cgroup(CG_PATH); 460 cgfd = create_and_get_cgroup(CG_PATH);
461 if (!cgfd) 461 if (cgfd < 0)
462 goto err; 462 goto err;
463 463
464 if (join_cgroup(CG_PATH)) 464 if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index 73b7493d4120..3f110eaaf29c 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -44,6 +44,7 @@
44#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4" 44#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
45#define SRC6_IP "::1" 45#define SRC6_IP "::1"
46#define SRC6_REWRITE_IP "::6" 46#define SRC6_REWRITE_IP "::6"
47#define WILDCARD6_IP "::"
47#define SERV6_PORT 6060 48#define SERV6_PORT 6060
48#define SERV6_REWRITE_PORT 6666 49#define SERV6_REWRITE_PORT 6666
49 50
@@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
85static int bind6_prog_load(const struct sock_addr_test *test); 86static int bind6_prog_load(const struct sock_addr_test *test);
86static int connect4_prog_load(const struct sock_addr_test *test); 87static int connect4_prog_load(const struct sock_addr_test *test);
87static int connect6_prog_load(const struct sock_addr_test *test); 88static int connect6_prog_load(const struct sock_addr_test *test);
89static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
88static int sendmsg_deny_prog_load(const struct sock_addr_test *test); 90static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
89static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test); 91static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
90static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test); 92static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
91static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test); 93static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
92static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test); 94static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
93static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test); 95static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
96static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
94 97
95static struct sock_addr_test tests[] = { 98static struct sock_addr_test tests[] = {
96 /* bind */ 99 /* bind */
@@ -463,6 +466,34 @@ static struct sock_addr_test tests[] = {
463 SYSCALL_ENOTSUPP, 466 SYSCALL_ENOTSUPP,
464 }, 467 },
465 { 468 {
469 "sendmsg6: set dst IP = [::] (BSD'ism)",
470 sendmsg6_rw_wildcard_prog_load,
471 BPF_CGROUP_UDP6_SENDMSG,
472 BPF_CGROUP_UDP6_SENDMSG,
473 AF_INET6,
474 SOCK_DGRAM,
475 SERV6_IP,
476 SERV6_PORT,
477 SERV6_REWRITE_IP,
478 SERV6_REWRITE_PORT,
479 SRC6_REWRITE_IP,
480 SUCCESS,
481 },
482 {
483 "sendmsg6: preserve dst IP = [::] (BSD'ism)",
484 sendmsg_allow_prog_load,
485 BPF_CGROUP_UDP6_SENDMSG,
486 BPF_CGROUP_UDP6_SENDMSG,
487 AF_INET6,
488 SOCK_DGRAM,
489 WILDCARD6_IP,
490 SERV6_PORT,
491 SERV6_REWRITE_IP,
492 SERV6_PORT,
493 SRC6_IP,
494 SUCCESS,
495 },
496 {
466 "sendmsg6: deny call", 497 "sendmsg6: deny call",
467 sendmsg_deny_prog_load, 498 sendmsg_deny_prog_load,
468 BPF_CGROUP_UDP6_SENDMSG, 499 BPF_CGROUP_UDP6_SENDMSG,
@@ -734,16 +765,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
734 return load_path(test, CONNECT6_PROG_PATH); 765 return load_path(test, CONNECT6_PROG_PATH);
735} 766}
736 767
737static int sendmsg_deny_prog_load(const struct sock_addr_test *test) 768static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
769 int32_t rc)
738{ 770{
739 struct bpf_insn insns[] = { 771 struct bpf_insn insns[] = {
740 /* return 0 */ 772 /* return rc */
741 BPF_MOV64_IMM(BPF_REG_0, 0), 773 BPF_MOV64_IMM(BPF_REG_0, rc),
742 BPF_EXIT_INSN(), 774 BPF_EXIT_INSN(),
743 }; 775 };
744 return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); 776 return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
745} 777}
746 778
779static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
780{
781 return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
782}
783
784static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
785{
786 return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
787}
788
747static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test) 789static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
748{ 790{
749 struct sockaddr_in dst4_rw_addr; 791 struct sockaddr_in dst4_rw_addr;
@@ -864,6 +906,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
864 return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP); 906 return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
865} 907}
866 908
909static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
910{
911 return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
912}
913
867static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test) 914static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
868{ 915{
869 return load_path(test, SENDMSG6_PROG_PATH); 916 return load_path(test, SENDMSG6_PROG_PATH);
@@ -1395,7 +1442,7 @@ int main(int argc, char **argv)
1395 goto err; 1442 goto err;
1396 1443
1397 cgfd = create_and_get_cgroup(CG_PATH); 1444 cgfd = create_and_get_cgroup(CG_PATH);
1398 if (!cgfd) 1445 if (cgfd < 0)
1399 goto err; 1446 goto err;
1400 1447
1401 if (join_cgroup(CG_PATH)) 1448 if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index b6c2c605d8c0..fc7832ee566b 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -202,7 +202,7 @@ int main(int argc, char **argv)
202 goto err; 202 goto err;
203 203
204 cgfd = create_and_get_cgroup(CG_PATH); 204 cgfd = create_and_get_cgroup(CG_PATH);
205 if (!cgfd) 205 if (cgfd < 0)
206 goto err; 206 goto err;
207 207
208 if (join_cgroup(CG_PATH)) 208 if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
index e6eebda7d112..716b4e3be581 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -103,7 +103,7 @@ int main(int argc, char **argv)
103 goto err; 103 goto err;
104 104
105 cg_fd = create_and_get_cgroup(cg_path); 105 cg_fd = create_and_get_cgroup(cg_path);
106 if (!cg_fd) 106 if (cg_fd < 0)
107 goto err; 107 goto err;
108 108
109 if (join_cgroup(cg_path)) 109 if (join_cgroup(cg_path))
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index ff3c4522aed6..4e4353711a86 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -115,7 +115,7 @@ int main(int argc, char **argv)
115 goto err; 115 goto err;
116 116
117 cg_fd = create_and_get_cgroup(cg_path); 117 cg_fd = create_and_get_cgroup(cg_path);
118 if (!cg_fd) 118 if (cg_fd < 0)
119 goto err; 119 goto err;
120 120
121 if (join_cgroup(cg_path)) 121 if (join_cgroup(cg_path))
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 10d44446e801..2fd90d456892 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -6934,6 +6934,126 @@ static struct bpf_test tests[] = {
6934 .retval = 1, 6934 .retval = 1,
6935 }, 6935 },
6936 { 6936 {
6937 "map access: mixing value pointer and scalar, 1",
6938 .insns = {
6939 // load map value pointer into r0 and r2
6940 BPF_MOV64_IMM(BPF_REG_0, 1),
6941 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
6942 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
6943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
6944 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
6945 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6946 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6947 BPF_EXIT_INSN(),
6948 // load some number from the map into r1
6949 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6950 // depending on r1, branch:
6951 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
6952 // branch A
6953 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6954 BPF_MOV64_IMM(BPF_REG_3, 0),
6955 BPF_JMP_A(2),
6956 // branch B
6957 BPF_MOV64_IMM(BPF_REG_2, 0),
6958 BPF_MOV64_IMM(BPF_REG_3, 0x100000),
6959 // common instruction
6960 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6961 // depending on r1, branch:
6962 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
6963 // branch A
6964 BPF_JMP_A(4),
6965 // branch B
6966 BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
6967 // verifier follows fall-through
6968 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
6969 BPF_MOV64_IMM(BPF_REG_0, 0),
6970 BPF_EXIT_INSN(),
6971 // fake-dead code; targeted from branch A to
6972 // prevent dead code sanitization
6973 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6974 BPF_MOV64_IMM(BPF_REG_0, 0),
6975 BPF_EXIT_INSN(),
6976 },
6977 .fixup_map_array_48b = { 1 },
6978 .result = ACCEPT,
6979 .result_unpriv = REJECT,
6980 .errstr_unpriv = "R2 tried to add from different pointers or scalars",
6981 .retval = 0,
6982 },
6983 {
6984 "map access: mixing value pointer and scalar, 2",
6985 .insns = {
6986 // load map value pointer into r0 and r2
6987 BPF_MOV64_IMM(BPF_REG_0, 1),
6988 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
6989 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
6990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
6991 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
6992 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6993 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6994 BPF_EXIT_INSN(),
6995 // load some number from the map into r1
6996 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6997 // depending on r1, branch:
6998 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
6999 // branch A
7000 BPF_MOV64_IMM(BPF_REG_2, 0),
7001 BPF_MOV64_IMM(BPF_REG_3, 0x100000),
7002 BPF_JMP_A(2),
7003 // branch B
7004 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7005 BPF_MOV64_IMM(BPF_REG_3, 0),
7006 // common instruction
7007 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7008 // depending on r1, branch:
7009 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
7010 // branch A
7011 BPF_JMP_A(4),
7012 // branch B
7013 BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
7014 // verifier follows fall-through
7015 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
7016 BPF_MOV64_IMM(BPF_REG_0, 0),
7017 BPF_EXIT_INSN(),
7018 // fake-dead code; targeted from branch A to
7019 // prevent dead code sanitization
7020 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7021 BPF_MOV64_IMM(BPF_REG_0, 0),
7022 BPF_EXIT_INSN(),
7023 },
7024 .fixup_map_array_48b = { 1 },
7025 .result = ACCEPT,
7026 .result_unpriv = REJECT,
7027 .errstr_unpriv = "R2 tried to add from different maps or paths",
7028 .retval = 0,
7029 },
7030 {
7031 "sanitation: alu with different scalars",
7032 .insns = {
7033 BPF_MOV64_IMM(BPF_REG_0, 1),
7034 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
7035 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
7036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
7037 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
7038 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7039 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7040 BPF_EXIT_INSN(),
7041 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7042 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
7043 BPF_MOV64_IMM(BPF_REG_2, 0),
7044 BPF_MOV64_IMM(BPF_REG_3, 0x100000),
7045 BPF_JMP_A(2),
7046 BPF_MOV64_IMM(BPF_REG_2, 42),
7047 BPF_MOV64_IMM(BPF_REG_3, 0x100001),
7048 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7049 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7050 BPF_EXIT_INSN(),
7051 },
7052 .fixup_map_array_48b = { 1 },
7053 .result = ACCEPT,
7054 .retval = 0x100000,
7055 },
7056 {
6937 "map access: value_ptr += known scalar, upper oob arith, test 1", 7057 "map access: value_ptr += known scalar, upper oob arith, test 1",
6938 .insns = { 7058 .insns = {
6939 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 7059 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
index bab13dd025a6..0d26b5e3f966 100755
--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
+++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
@@ -37,6 +37,10 @@ prerequisite()
37 exit $ksft_skip 37 exit $ksft_skip
38 fi 38 fi
39 39
40 present_cpus=`cat $SYSFS/devices/system/cpu/present`
41 present_max=${present_cpus##*-}
42 echo "present_cpus = $present_cpus present_max = $present_max"
43
40 echo -e "\t Cpus in online state: $online_cpus" 44 echo -e "\t Cpus in online state: $online_cpus"
41 45
42 offline_cpus=`cat $SYSFS/devices/system/cpu/offline` 46 offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
@@ -151,6 +155,8 @@ online_cpus=0
151online_max=0 155online_max=0
152offline_cpus=0 156offline_cpus=0
153offline_max=0 157offline_max=0
158present_cpus=0
159present_max=0
154 160
155while getopts e:ahp: opt; do 161while getopts e:ahp: opt; do
156 case $opt in 162 case $opt in
@@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then
190 online_cpu_expect_success $online_max 196 online_cpu_expect_success $online_max
191 197
192 if [[ $offline_cpus -gt 0 ]]; then 198 if [[ $offline_cpus -gt 0 ]]; then
193 echo -e "\t offline to online to offline: cpu $offline_max" 199 echo -e "\t offline to online to offline: cpu $present_max"
194 online_cpu_expect_success $offline_max 200 online_cpu_expect_success $present_max
195 offline_cpu_expect_success $offline_max 201 offline_cpu_expect_success $present_max
202 online_cpu $present_max
196 fi 203 fi
197 exit 0 204 exit 0
198else 205else
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
index 94fdbf215c14..c4cf6e6d800e 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -25,6 +25,7 @@ ALL_TESTS="
25 lag_unlink_slaves_test 25 lag_unlink_slaves_test
26 lag_dev_deletion_test 26 lag_dev_deletion_test
27 vlan_interface_uppers_test 27 vlan_interface_uppers_test
28 bridge_extern_learn_test
28 devlink_reload_test 29 devlink_reload_test
29" 30"
30NUM_NETIFS=2 31NUM_NETIFS=2
@@ -541,6 +542,25 @@ vlan_interface_uppers_test()
541 ip link del dev br0 542 ip link del dev br0
542} 543}
543 544
545bridge_extern_learn_test()
546{
547 # Test that externally learned entries added from user space are
548 # marked as offloaded
549 RET=0
550
551 ip link add name br0 type bridge
552 ip link set dev $swp1 master br0
553
554 bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn
555
556 bridge fdb show brport $swp1 | grep de:ad:be:ef:13:37 | grep -q offload
557 check_err $? "fdb entry not marked as offloaded when should"
558
559 log_test "externally learned fdb entry"
560
561 ip link del dev br0
562}
563
544devlink_reload_test() 564devlink_reload_test()
545{ 565{
546 # Test that after executing all the above configuration tests, a 566 # Test that after executing all the above configuration tests, a
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
index dcf9f4e913e0..ae6146ec5afd 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
@@ -847,6 +847,24 @@ sanitization_vlan_aware_test()
847 847
848 log_test "vlan-aware - failed enslavement to vlan-aware bridge" 848 log_test "vlan-aware - failed enslavement to vlan-aware bridge"
849 849
850 bridge vlan del vid 10 dev vxlan20
851 bridge vlan add vid 20 dev vxlan20 pvid untagged
852
853 # Test that offloading of an unsupported tunnel fails when it is
854 # triggered by addition of VLAN to a local port
855 RET=0
856
857 # TOS must be set to inherit
858 ip link set dev vxlan10 type vxlan tos 42
859
860 ip link set dev $swp1 master br0
861 bridge vlan add vid 10 dev $swp1 &> /dev/null
862 check_fail $?
863
864 log_test "vlan-aware - failed vlan addition to a local port"
865
866 ip link set dev vxlan10 type vxlan tos inherit
867
850 ip link del dev vxlan20 868 ip link del dev vxlan20
851 ip link del dev vxlan10 869 ip link del dev vxlan10
852 ip link del dev br0 870 ip link del dev br0
diff --git a/tools/testing/selftests/filesystems/binderfs/.gitignore b/tools/testing/selftests/filesystems/binderfs/.gitignore
new file mode 100644
index 000000000000..8a5d9bf63dd4
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/.gitignore
@@ -0,0 +1 @@
binderfs_test
diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
new file mode 100644
index 000000000000..58cb659b56b4
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/Makefile
@@ -0,0 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0
2
3CFLAGS += -I../../../../../usr/include/
4TEST_GEN_PROGS := binderfs_test
5
6include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
new file mode 100644
index 000000000000..8c2ed962e1c7
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -0,0 +1,275 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#define _GNU_SOURCE
4#include <errno.h>
5#include <fcntl.h>
6#include <sched.h>
7#include <stdbool.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/ioctl.h>
12#include <sys/mount.h>
13#include <sys/stat.h>
14#include <sys/types.h>
15#include <unistd.h>
16#include <linux/android/binder.h>
17#include <linux/android/binderfs.h>
18#include "../../kselftest.h"
19
20static ssize_t write_nointr(int fd, const void *buf, size_t count)
21{
22 ssize_t ret;
23again:
24 ret = write(fd, buf, count);
25 if (ret < 0 && errno == EINTR)
26 goto again;
27
28 return ret;
29}
30
31static void write_to_file(const char *filename, const void *buf, size_t count,
32 int allowed_errno)
33{
34 int fd, saved_errno;
35 ssize_t ret;
36
37 fd = open(filename, O_WRONLY | O_CLOEXEC);
38 if (fd < 0)
39 ksft_exit_fail_msg("%s - Failed to open file %s\n",
40 strerror(errno), filename);
41
42 ret = write_nointr(fd, buf, count);
43 if (ret < 0) {
44 if (allowed_errno && (errno == allowed_errno)) {
45 close(fd);
46 return;
47 }
48
49 goto on_error;
50 }
51
52 if ((size_t)ret != count)
53 goto on_error;
54
55 close(fd);
56 return;
57
58on_error:
59 saved_errno = errno;
60 close(fd);
61 errno = saved_errno;
62
63 if (ret < 0)
64 ksft_exit_fail_msg("%s - Failed to write to file %s\n",
65 strerror(errno), filename);
66
67 ksft_exit_fail_msg("Failed to write to file %s\n", filename);
68}
69
70static void change_to_userns(void)
71{
72 int ret;
73 uid_t uid;
74 gid_t gid;
75 /* {g,u}id_map files only allow a max of 4096 bytes written to them */
76 char idmap[4096];
77
78 uid = getuid();
79 gid = getgid();
80
81 ret = unshare(CLONE_NEWUSER);
82 if (ret < 0)
83 ksft_exit_fail_msg("%s - Failed to unshare user namespace\n",
84 strerror(errno));
85
86 write_to_file("/proc/self/setgroups", "deny", strlen("deny"), ENOENT);
87
88 ret = snprintf(idmap, sizeof(idmap), "0 %d 1", uid);
89 if (ret < 0 || (size_t)ret >= sizeof(idmap))
90 ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
91 strerror(errno));
92
93 write_to_file("/proc/self/uid_map", idmap, strlen(idmap), 0);
94
95 ret = snprintf(idmap, sizeof(idmap), "0 %d 1", gid);
96 if (ret < 0 || (size_t)ret >= sizeof(idmap))
97 ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
98 strerror(errno));
99
100 write_to_file("/proc/self/gid_map", idmap, strlen(idmap), 0);
101
102 ret = setgid(0);
103 if (ret)
104 ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
105 strerror(errno));
106
107 ret = setuid(0);
108 if (ret)
109 ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
110 strerror(errno));
111}
112
113static void change_to_mountns(void)
114{
115 int ret;
116
117 ret = unshare(CLONE_NEWNS);
118 if (ret < 0)
119 ksft_exit_fail_msg("%s - Failed to unshare mount namespace\n",
120 strerror(errno));
121
122 ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
123 if (ret < 0)
124 ksft_exit_fail_msg("%s - Failed to mount / as private\n",
125 strerror(errno));
126}
127
128static void rmdir_protect_errno(const char *dir)
129{
130 int saved_errno = errno;
131 (void)rmdir(dir);
132 errno = saved_errno;
133}
134
135static void __do_binderfs_test(void)
136{
137 int fd, ret, saved_errno;
138 size_t len;
139 ssize_t wret;
140 bool keep = false;
141 struct binderfs_device device = { 0 };
142 struct binder_version version = { 0 };
143
144 change_to_mountns();
145
146 ret = mkdir("/dev/binderfs", 0755);
147 if (ret < 0) {
148 if (errno != EEXIST)
149 ksft_exit_fail_msg(
150 "%s - Failed to create binderfs mountpoint\n",
151 strerror(errno));
152
153 keep = true;
154 }
155
156 ret = mount(NULL, "/dev/binderfs", "binder", 0, 0);
157 if (ret < 0) {
158 if (errno != ENODEV)
159 ksft_exit_fail_msg("%s - Failed to mount binderfs\n",
160 strerror(errno));
161
162 keep ? : rmdir_protect_errno("/dev/binderfs");
163 ksft_exit_skip(
164 "The Android binderfs filesystem is not available\n");
165 }
166
167 /* binderfs mount test passed */
168 ksft_inc_pass_cnt();
169
170 memcpy(device.name, "my-binder", strlen("my-binder"));
171
172 fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
173 if (fd < 0)
174 ksft_exit_fail_msg(
175 "%s - Failed to open binder-control device\n",
176 strerror(errno));
177
178 ret = ioctl(fd, BINDER_CTL_ADD, &device);
179 saved_errno = errno;
180 close(fd);
181 errno = saved_errno;
182 if (ret < 0) {
183 keep ? : rmdir_protect_errno("/dev/binderfs");
184 ksft_exit_fail_msg(
185 "%s - Failed to allocate new binder device\n",
186 strerror(errno));
187 }
188
189 ksft_print_msg(
190 "Allocated new binder device with major %d, minor %d, and name %s\n",
191 device.major, device.minor, device.name);
192
193 /* binder device allocation test passed */
194 ksft_inc_pass_cnt();
195
196 fd = open("/dev/binderfs/my-binder", O_CLOEXEC | O_RDONLY);
197 if (fd < 0) {
198 keep ? : rmdir_protect_errno("/dev/binderfs");
199 ksft_exit_fail_msg("%s - Failed to open my-binder device\n",
200 strerror(errno));
201 }
202
203 ret = ioctl(fd, BINDER_VERSION, &version);
204 saved_errno = errno;
205 close(fd);
206 errno = saved_errno;
207 if (ret < 0) {
208 keep ? : rmdir_protect_errno("/dev/binderfs");
209 ksft_exit_fail_msg(
210 "%s - Failed to open perform BINDER_VERSION request\n",
211 strerror(errno));
212 }
213
214 ksft_print_msg("Detected binder version: %d\n",
215 version.protocol_version);
216
217 /* binder transaction with binderfs binder device passed */
218 ksft_inc_pass_cnt();
219
220 ret = unlink("/dev/binderfs/my-binder");
221 if (ret < 0) {
222 keep ? : rmdir_protect_errno("/dev/binderfs");
223 ksft_exit_fail_msg("%s - Failed to delete binder device\n",
224 strerror(errno));
225 }
226
227 /* binder device removal passed */
228 ksft_inc_pass_cnt();
229
230 ret = unlink("/dev/binderfs/binder-control");
231 if (!ret) {
232 keep ? : rmdir_protect_errno("/dev/binderfs");
233 ksft_exit_fail_msg("Managed to delete binder-control device\n");
234 } else if (errno != EPERM) {
235 keep ? : rmdir_protect_errno("/dev/binderfs");
236 ksft_exit_fail_msg(
237 "%s - Failed to delete binder-control device but exited with unexpected error code\n",
238 strerror(errno));
239 }
240
241 /* binder-control device removal failed as expected */
242 ksft_inc_xfail_cnt();
243
244on_error:
245 ret = umount2("/dev/binderfs", MNT_DETACH);
246 keep ?: rmdir_protect_errno("/dev/binderfs");
247 if (ret < 0)
248 ksft_exit_fail_msg("%s - Failed to unmount binderfs\n",
249 strerror(errno));
250
251 /* binderfs unmount test passed */
252 ksft_inc_pass_cnt();
253}
254
255static void binderfs_test_privileged()
256{
257 if (geteuid() != 0)
258 ksft_print_msg(
259 "Tests are not run as root. Skipping privileged tests\n");
260 else
261 __do_binderfs_test();
262}
263
264static void binderfs_test_unprivileged()
265{
266 change_to_userns();
267 __do_binderfs_test();
268}
269
270int main(int argc, char *argv[])
271{
272 binderfs_test_privileged();
273 binderfs_test_unprivileged();
274 ksft_exit_pass();
275}
diff --git a/tools/testing/selftests/filesystems/binderfs/config b/tools/testing/selftests/filesystems/binderfs/config
new file mode 100644
index 000000000000..02dd6cc9cf99
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/config
@@ -0,0 +1,3 @@
1CONFIG_ANDROID=y
2CONFIG_ANDROID_BINDERFS=y
3CONFIG_ANDROID_BINDER_IPC=y
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
index f8d468f54e98..aaa1e9f083c3 100644
--- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c
+++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
@@ -37,7 +37,7 @@ static int get_debugfs(char **path)
37 struct libmnt_table *tb; 37 struct libmnt_table *tb;
38 struct libmnt_iter *itr = NULL; 38 struct libmnt_iter *itr = NULL;
39 struct libmnt_fs *fs; 39 struct libmnt_fs *fs;
40 int found = 0; 40 int found = 0, ret;
41 41
42 cxt = mnt_new_context(); 42 cxt = mnt_new_context();
43 if (!cxt) 43 if (!cxt)
@@ -58,8 +58,11 @@ static int get_debugfs(char **path)
58 break; 58 break;
59 } 59 }
60 } 60 }
61 if (found) 61 if (found) {
62 asprintf(path, "%s/gpio", mnt_fs_get_target(fs)); 62 ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
63 if (ret < 0)
64 err(EXIT_FAILURE, "failed to format string");
65 }
63 66
64 mnt_free_iter(itr); 67 mnt_free_iter(itr);
65 mnt_free_context(cxt); 68 mnt_free_context(cxt);
diff --git a/tools/testing/selftests/ir/Makefile b/tools/testing/selftests/ir/Makefile
index f4ba8eb84b95..ad06489c22a5 100644
--- a/tools/testing/selftests/ir/Makefile
+++ b/tools/testing/selftests/ir/Makefile
@@ -1,5 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2TEST_PROGS := ir_loopback.sh 2TEST_PROGS := ir_loopback.sh
3TEST_GEN_PROGS_EXTENDED := ir_loopback 3TEST_GEN_PROGS_EXTENDED := ir_loopback
4APIDIR := ../../../include/uapi
5CFLAGS += -Wall -O2 -I$(APIDIR)
4 6
5include ../lib.mk 7include ../lib.mk
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 23022e9d32eb..b52cfdefecbf 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -571,7 +571,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
571 * already exist. 571 * already exist.
572 */ 572 */
573 region = (struct userspace_mem_region *) userspace_mem_region_find( 573 region = (struct userspace_mem_region *) userspace_mem_region_find(
574 vm, guest_paddr, guest_paddr + npages * vm->page_size); 574 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
575 if (region != NULL) 575 if (region != NULL)
576 TEST_ASSERT(false, "overlapping userspace_mem_region already " 576 TEST_ASSERT(false, "overlapping userspace_mem_region already "
577 "exists\n" 577 "exists\n"
@@ -587,15 +587,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
587 region = region->next) { 587 region = region->next) {
588 if (region->region.slot == slot) 588 if (region->region.slot == slot)
589 break; 589 break;
590 if ((guest_paddr <= (region->region.guest_phys_addr
591 + region->region.memory_size))
592 && ((guest_paddr + npages * vm->page_size)
593 >= region->region.guest_phys_addr))
594 break;
595 } 590 }
596 if (region != NULL) 591 if (region != NULL)
597 TEST_ASSERT(false, "A mem region with the requested slot " 592 TEST_ASSERT(false, "A mem region with the requested slot "
598 "or overlapping physical memory range already exists.\n" 593 "already exists.\n"
599 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 594 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
600 " existing slot: %u paddr: 0x%lx size: 0x%lx", 595 " existing slot: %u paddr: 0x%lx size: 0x%lx",
601 slot, guest_paddr, npages, 596 slot, guest_paddr, npages,
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index ea3c73e8f4f6..c49c2a28b0eb 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -103,6 +103,12 @@ int main(int argc, char *argv[])
103 103
104 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 104 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
105 105
106 /* KVM should return supported EVMCS version range */
107 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
108 (evmcs_ver & 0xff) > 0,
109 "Incorrect EVMCS version range: %x:%x\n",
110 evmcs_ver & 0xff, evmcs_ver >> 8);
111
106 run = vcpu_state(vm, VCPU_ID); 112 run = vcpu_state(vm, VCPU_ID);
107 113
108 vcpu_regs_get(vm, VCPU_ID, &regs1); 114 vcpu_regs_get(vm, VCPU_ID, &regs1);
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index f8f3e90700c0..1e6d14d2825c 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -21,6 +21,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
21KSFT_KHDR_INSTALL := 1 21KSFT_KHDR_INSTALL := 1
22include ../lib.mk 22include ../lib.mk
23 23
24$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma 24$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
25$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread 25$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
26$(OUTPUT)/tcp_inq: LDFLAGS += -lpthread 26$(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 802b4af18729..1080ff55a788 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -388,6 +388,7 @@ fib_carrier_unicast_test()
388 388
389 set -e 389 set -e
390 $IP link set dev dummy0 carrier off 390 $IP link set dev dummy0 carrier off
391 sleep 1
391 set +e 392 set +e
392 393
393 echo " Carrier down" 394 echo " Carrier down"
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
index d8313d0438b7..b90dff8d3a94 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
@@ -1,7 +1,7 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding" 4ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
5NUM_NETIFS=4 5NUM_NETIFS=4
6CHECK_TC="yes" 6CHECK_TC="yes"
7source lib.sh 7source lib.sh
@@ -96,6 +96,51 @@ flooding()
96 flood_test $swp2 $h1 $h2 96 flood_test $swp2 $h1 $h2
97} 97}
98 98
99vlan_deletion()
100{
101 # Test that the deletion of a VLAN on a bridge port does not affect
102 # the PVID VLAN
103 log_info "Add and delete a VLAN on bridge port $swp1"
104
105 bridge vlan add vid 10 dev $swp1
106 bridge vlan del vid 10 dev $swp1
107
108 ping_ipv4
109 ping_ipv6
110}
111
112extern_learn()
113{
114 local mac=de:ad:be:ef:13:37
115 local ageing_time
116
117 # Test that externally learned FDB entries can roam, but not age out
118 RET=0
119
120 bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
121
122 bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
123 check_err $? "Did not find FDB entry when should"
124
125 # Wait for 10 seconds after the ageing time to make sure the FDB entry
126 # was not aged out
127 ageing_time=$(bridge_ageing_time_get br0)
128 sleep $((ageing_time + 10))
129
130 bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
131 check_err $? "FDB entry was aged out when should not"
132
133 $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
134
135 bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
136 check_err $? "FDB entry did not roam when should"
137
138 log_test "Externally learned FDB entry - ageing & roaming"
139
140 bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
141 bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
142}
143
99trap cleanup EXIT 144trap cleanup EXIT
100 145
101setup_prepare 146setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
index 56cef3b1c194..bb10e33690b2 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -629,7 +629,7 @@ __test_ecn_decap()
629 RET=0 629 RET=0
630 630
631 tc filter add dev $h1 ingress pref 77 prot ip \ 631 tc filter add dev $h1 ingress pref 77 prot ip \
632 flower ip_tos $decapped_tos action pass 632 flower ip_tos $decapped_tos action drop
633 sleep 1 633 sleep 1
634 vxlan_encapped_ping_test v2 v1 192.0.2.17 \ 634 vxlan_encapped_ping_test v2 v1 192.0.2.17 \
635 $orig_inner_tos $orig_outer_tos \ 635 $orig_inner_tos $orig_outer_tos \
diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c
index 61ae2782388e..5d56cc0838f6 100644
--- a/tools/testing/selftests/net/ip_defrag.c
+++ b/tools/testing/selftests/net/ip_defrag.c
@@ -203,6 +203,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
203{ 203{
204 struct ip *iphdr = (struct ip *)ip_frame; 204 struct ip *iphdr = (struct ip *)ip_frame;
205 struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame; 205 struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
206 const bool ipv4 = !ipv6;
206 int res; 207 int res;
207 int offset; 208 int offset;
208 int frag_len; 209 int frag_len;
@@ -239,19 +240,53 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
239 iphdr->ip_sum = 0; 240 iphdr->ip_sum = 0;
240 } 241 }
241 242
243 /* Occasionally test in-order fragments. */
244 if (!cfg_overlap && (rand() % 100 < 15)) {
245 offset = 0;
246 while (offset < (UDP_HLEN + payload_len)) {
247 send_fragment(fd_raw, addr, alen, offset, ipv6);
248 offset += max_frag_len;
249 }
250 return;
251 }
252
253 /* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
254 if (ipv4 && !cfg_overlap && (rand() % 100 < 20) &&
255 (payload_len > 9 * max_frag_len)) {
256 offset = 6 * max_frag_len;
257 while (offset < (UDP_HLEN + payload_len)) {
258 send_fragment(fd_raw, addr, alen, offset, ipv6);
259 offset += max_frag_len;
260 }
261 offset = 3 * max_frag_len;
262 while (offset < 6 * max_frag_len) {
263 send_fragment(fd_raw, addr, alen, offset, ipv6);
264 offset += max_frag_len;
265 }
266 offset = 0;
267 while (offset < 3 * max_frag_len) {
268 send_fragment(fd_raw, addr, alen, offset, ipv6);
269 offset += max_frag_len;
270 }
271 return;
272 }
273
242 /* Odd fragments. */ 274 /* Odd fragments. */
243 offset = max_frag_len; 275 offset = max_frag_len;
244 while (offset < (UDP_HLEN + payload_len)) { 276 while (offset < (UDP_HLEN + payload_len)) {
245 send_fragment(fd_raw, addr, alen, offset, ipv6); 277 send_fragment(fd_raw, addr, alen, offset, ipv6);
278 /* IPv4 ignores duplicates, so randomly send a duplicate. */
279 if (ipv4 && (1 == rand() % 100))
280 send_fragment(fd_raw, addr, alen, offset, ipv6);
246 offset += 2 * max_frag_len; 281 offset += 2 * max_frag_len;
247 } 282 }
248 283
249 if (cfg_overlap) { 284 if (cfg_overlap) {
250 /* Send an extra random fragment. */ 285 /* Send an extra random fragment. */
251 offset = rand() % (UDP_HLEN + payload_len - 1);
252 /* sendto() returns EINVAL if offset + frag_len is too small. */
253 if (ipv6) { 286 if (ipv6) {
254 struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN); 287 struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
288 /* sendto() returns EINVAL if offset + frag_len is too small. */
289 offset = rand() % (UDP_HLEN + payload_len - 1);
255 frag_len = max_frag_len + rand() % 256; 290 frag_len = max_frag_len + rand() % 256;
256 /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */ 291 /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
257 frag_len &= ~0x7; 292 frag_len &= ~0x7;
@@ -259,13 +294,29 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
259 ip6hdr->ip6_plen = htons(frag_len); 294 ip6hdr->ip6_plen = htons(frag_len);
260 frag_len += IP6_HLEN; 295 frag_len += IP6_HLEN;
261 } else { 296 } else {
262 frag_len = IP4_HLEN + UDP_HLEN + rand() % 256; 297 /* In IPv4, duplicates and some fragments completely inside
298 * previously sent fragments are dropped/ignored. So
299 * random offset and frag_len can result in a dropped
300 * fragment instead of a dropped queue/packet. So we
301 * hard-code offset and frag_len.
302 *
303 * See ade446403bfb ("net: ipv4: do not handle duplicate
304 * fragments as overlapping").
305 */
306 if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
307 /* not enough payload to play with random offset and frag_len. */
308 offset = 8;
309 frag_len = IP4_HLEN + UDP_HLEN + max_frag_len;
310 } else {
311 offset = rand() % (payload_len / 2);
312 frag_len = 2 * max_frag_len + 1 + rand() % 256;
313 }
263 iphdr->ip_off = htons(offset / 8 | IP4_MF); 314 iphdr->ip_off = htons(offset / 8 | IP4_MF);
264 iphdr->ip_len = htons(frag_len); 315 iphdr->ip_len = htons(frag_len);
265 } 316 }
266 res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen); 317 res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen);
267 if (res < 0) 318 if (res < 0)
268 error(1, errno, "sendto overlap"); 319 error(1, errno, "sendto overlap: %d", frag_len);
269 if (res != frag_len) 320 if (res != frag_len)
270 error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len); 321 error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len);
271 frag_counter++; 322 frag_counter++;
@@ -275,6 +326,9 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
275 offset = 0; 326 offset = 0;
276 while (offset < (UDP_HLEN + payload_len)) { 327 while (offset < (UDP_HLEN + payload_len)) {
277 send_fragment(fd_raw, addr, alen, offset, ipv6); 328 send_fragment(fd_raw, addr, alen, offset, ipv6);
329 /* IPv4 ignores duplicates, so randomly send a duplicate. */
330 if (ipv4 && (1 == rand() % 100))
331 send_fragment(fd_raw, addr, alen, offset, ipv6);
278 offset += 2 * max_frag_len; 332 offset += 2 * max_frag_len;
279 } 333 }
280} 334}
@@ -282,7 +336,11 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
282static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6) 336static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
283{ 337{
284 int fd_tx_raw, fd_rx_udp; 338 int fd_tx_raw, fd_rx_udp;
285 struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 }; 339 /* Frag queue timeout is set to one second in the calling script;
340 * socket timeout should be just a bit longer to avoid tests interfering
341 * with each other.
342 */
343 struct timeval tv = { .tv_sec = 1, .tv_usec = 10 };
286 int idx; 344 int idx;
287 int min_frag_len = ipv6 ? 1280 : 8; 345 int min_frag_len = ipv6 ? 1280 : 8;
288 346
@@ -308,12 +366,32 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
308 payload_len += (rand() % 4096)) { 366 payload_len += (rand() % 4096)) {
309 if (cfg_verbose) 367 if (cfg_verbose)
310 printf("payload_len: %d\n", payload_len); 368 printf("payload_len: %d\n", payload_len);
311 max_frag_len = min_frag_len; 369
312 do { 370 if (cfg_overlap) {
371 /* With overlaps, one send/receive pair below takes
372 * at least one second (== timeout) to run, so there
373 * is not enough test time to run a nested loop:
374 * the full overlap test takes 20-30 seconds.
375 */
376 max_frag_len = min_frag_len +
377 rand() % (1500 - FRAG_HLEN - min_frag_len);
313 send_udp_frags(fd_tx_raw, addr, alen, ipv6); 378 send_udp_frags(fd_tx_raw, addr, alen, ipv6);
314 recv_validate_udp(fd_rx_udp); 379 recv_validate_udp(fd_rx_udp);
315 max_frag_len += 8 * (rand() % 8); 380 } else {
316 } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len); 381 /* Without overlaps, each packet reassembly (== one
382 * send/receive pair below) takes very little time to
383 * run, so we can easily afford more thourough testing
384 * with a nested loop: the full non-overlap test takes
385 * less than one second).
386 */
387 max_frag_len = min_frag_len;
388 do {
389 send_udp_frags(fd_tx_raw, addr, alen, ipv6);
390 recv_validate_udp(fd_rx_udp);
391 max_frag_len += 8 * (rand() % 8);
392 } while (max_frag_len < (1500 - FRAG_HLEN) &&
393 max_frag_len <= payload_len);
394 }
317 } 395 }
318 396
319 /* Cleanup. */ 397 /* Cleanup. */
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
index f34672796044..7dd79a9efb17 100755
--- a/tools/testing/selftests/net/ip_defrag.sh
+++ b/tools/testing/selftests/net/ip_defrag.sh
@@ -11,10 +11,17 @@ readonly NETNS="ns-$(mktemp -u XXXXXX)"
11setup() { 11setup() {
12 ip netns add "${NETNS}" 12 ip netns add "${NETNS}"
13 ip -netns "${NETNS}" link set lo up 13 ip -netns "${NETNS}" link set lo up
14
14 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1 15 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1
15 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1 16 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1
17 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_time=1 >/dev/null 2>&1
18
16 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1 19 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1
17 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1 20 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
21 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1
22
23 # DST cache can get full with a lot of frags, with GC not keeping up with the test.
24 ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1
18} 25}
19 26
20cleanup() { 27cleanup() {
@@ -27,7 +34,6 @@ setup
27echo "ipv4 defrag" 34echo "ipv4 defrag"
28ip netns exec "${NETNS}" ./ip_defrag -4 35ip netns exec "${NETNS}" ./ip_defrag -4
29 36
30
31echo "ipv4 defrag with overlaps" 37echo "ipv4 defrag with overlaps"
32ip netns exec "${NETNS}" ./ip_defrag -4o 38ip netns exec "${NETNS}" ./ip_defrag -4o
33 39
@@ -37,3 +43,4 @@ ip netns exec "${NETNS}" ./ip_defrag -6
37echo "ipv6 defrag with overlaps" 43echo "ipv6 defrag with overlaps"
38ip netns exec "${NETNS}" ./ip_defrag -6o 44ip netns exec "${NETNS}" ./ip_defrag -6o
39 45
46echo "all tests done"
diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh
index 8db35b99457c..71d7fdc513c1 100755
--- a/tools/testing/selftests/net/xfrm_policy.sh
+++ b/tools/testing/selftests/net/xfrm_policy.sh
@@ -28,6 +28,19 @@ KEY_AES=0x0123456789abcdef0123456789012345
28SPI1=0x1 28SPI1=0x1
29SPI2=0x2 29SPI2=0x2
30 30
31do_esp_policy() {
32 local ns=$1
33 local me=$2
34 local remote=$3
35 local lnet=$4
36 local rnet=$5
37
38 # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
39 ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow
40 # to fwd decrypted packets after esp processing:
41 ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow
42}
43
31do_esp() { 44do_esp() {
32 local ns=$1 45 local ns=$1
33 local me=$2 46 local me=$2
@@ -40,10 +53,59 @@ do_esp() {
40 ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet 53 ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet
41 ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet 54 ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet
42 55
43 # to encrypt packets as they go out (includes forwarded packets that need encapsulation) 56 do_esp_policy $ns $me $remote $lnet $rnet
44 ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow 57}
45 # to fwd decrypted packets after esp processing: 58
46 ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow 59# add policies with different netmasks, to make sure kernel carries
60# the policies contained within new netmask over when search tree is
61# re-built.
62# peer netns that are supposed to be encapsulated via esp have addresses
63# in the 10.0.1.0/24 and 10.0.2.0/24 subnets, respectively.
64#
65# Adding a policy for '10.0.1.0/23' will make it necessary to
66# alter the prefix of 10.0.1.0 subnet.
67# In case new prefix overlaps with existing node, the node and all
68# policies it carries need to be merged with the existing one(s).
69#
70# Do that here.
71do_overlap()
72{
73 local ns=$1
74
75 # adds new nodes to tree (neither network exists yet in policy database).
76 ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block
77
78 # adds a new node in the 10.0.0.0/24 tree (dst node exists).
79 ip -net $ns xfrm policy add src 10.2.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block
80
81 # adds a 10.2.0.0/23 node, but for different dst.
82 ip -net $ns xfrm policy add src 10.2.0.0/23 dst 10.0.1.0/24 dir fwd priority 200 action block
83
84 # dst now overlaps with the 10.0.1.0/24 ESP policy in fwd.
85 # kernel must 'promote' existing one (10.0.0.0/24) to 10.0.0.0/23.
86 # But 10.0.0.0/23 also includes existing 10.0.1.0/24, so that node
87 # also has to be merged too, including source-sorted subtrees.
88 # old:
89 # 10.0.0.0/24 (node 1 in dst tree of the bin)
90 # 10.1.0.0/24 (node in src tree of dst node 1)
91 # 10.2.0.0/24 (node in src tree of dst node 1)
92 # 10.0.1.0/24 (node 2 in dst tree of the bin)
93 # 10.0.2.0/24 (node in src tree of dst node 2)
94 # 10.2.0.0/24 (node in src tree of dst node 2)
95 #
96 # The next 'policy add' adds dst '10.0.0.0/23', which means
97 # that dst node 1 and dst node 2 have to be merged including
98 # the sub-tree. As no duplicates are allowed, policies in
99 # the two '10.0.2.0/24' are also merged.
100 #
101 # after the 'add', internal search tree should look like this:
102 # 10.0.0.0/23 (node in dst tree of bin)
103 # 10.0.2.0/24 (node in src tree of dst node)
104 # 10.1.0.0/24 (node in src tree of dst node)
105 # 10.2.0.0/24 (node in src tree of dst node)
106 #
107 # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23.
108 ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block
47} 109}
48 110
49do_esp_policy_get_check() { 111do_esp_policy_get_check() {
@@ -160,6 +222,41 @@ check_xfrm() {
160 return $lret 222 return $lret
161} 223}
162 224
225check_exceptions()
226{
227 logpostfix="$1"
228 local lret=0
229
230 # ping to .254 should be excluded from the tunnel (exception is in place).
231 check_xfrm 0 254
232 if [ $? -ne 0 ]; then
233 echo "FAIL: expected ping to .254 to fail ($logpostfix)"
234 lret=1
235 else
236 echo "PASS: ping to .254 bypassed ipsec tunnel ($logpostfix)"
237 fi
238
239 # ping to .253 should use use ipsec due to direct policy exception.
240 check_xfrm 1 253
241 if [ $? -ne 0 ]; then
242 echo "FAIL: expected ping to .253 to use ipsec tunnel ($logpostfix)"
243 lret=1
244 else
245 echo "PASS: direct policy matches ($logpostfix)"
246 fi
247
248 # ping to .2 should use ipsec.
249 check_xfrm 1 2
250 if [ $? -ne 0 ]; then
251 echo "FAIL: expected ping to .2 to use ipsec tunnel ($logpostfix)"
252 lret=1
253 else
254 echo "PASS: policy matches ($logpostfix)"
255 fi
256
257 return $lret
258}
259
163#check for needed privileges 260#check for needed privileges
164if [ "$(id -u)" -ne 0 ];then 261if [ "$(id -u)" -ne 0 ];then
165 echo "SKIP: Need root privileges" 262 echo "SKIP: Need root privileges"
@@ -270,33 +367,45 @@ do_exception ns4 10.0.3.10 10.0.3.1 10.0.1.253 10.0.1.240/28
270do_exception ns3 dead:3::1 dead:3::10 dead:2::fd dead:2:f0::/96 367do_exception ns3 dead:3::1 dead:3::10 dead:2::fd dead:2:f0::/96
271do_exception ns4 dead:3::10 dead:3::1 dead:1::fd dead:1:f0::/96 368do_exception ns4 dead:3::10 dead:3::1 dead:1::fd dead:1:f0::/96
272 369
273# ping to .254 should now be excluded from the tunnel 370check_exceptions "exceptions"
274check_xfrm 0 254
275if [ $? -ne 0 ]; then 371if [ $? -ne 0 ]; then
276 echo "FAIL: expected ping to .254 to fail"
277 ret=1 372 ret=1
278else
279 echo "PASS: ping to .254 bypassed ipsec tunnel"
280fi 373fi
281 374
282# ping to .253 should use use ipsec due to direct policy exception. 375# insert block policies with adjacent/overlapping netmasks
283check_xfrm 1 253 376do_overlap ns3
284if [ $? -ne 0 ]; then
285 echo "FAIL: expected ping to .253 to use ipsec tunnel"
286 ret=1
287else
288 echo "PASS: direct policy matches"
289fi
290 377
291# ping to .2 should use ipsec. 378check_exceptions "exceptions and block policies"
292check_xfrm 1 2
293if [ $? -ne 0 ]; then 379if [ $? -ne 0 ]; then
294 echo "FAIL: expected ping to .2 to use ipsec tunnel"
295 ret=1 380 ret=1
296else
297 echo "PASS: policy matches"
298fi 381fi
299 382
383for n in ns3 ns4;do
384 ip -net $n xfrm policy set hthresh4 28 24 hthresh6 126 125
385 sleep $((RANDOM%5))
386done
387
388check_exceptions "exceptions and block policies after hresh changes"
389
390# full flush of policy db, check everything gets freed incl. internal meta data
391ip -net ns3 xfrm policy flush
392
393do_esp_policy ns3 10.0.3.1 10.0.3.10 10.0.1.0/24 10.0.2.0/24
394do_exception ns3 10.0.3.1 10.0.3.10 10.0.2.253 10.0.2.240/28
395
396# move inexact policies to hash table
397ip -net ns3 xfrm policy set hthresh4 16 16
398
399sleep $((RANDOM%5))
400check_exceptions "exceptions and block policies after hthresh change in ns3"
401
402# restore original hthresh settings -- move policies back to tables
403for n in ns3 ns4;do
404 ip -net $n xfrm policy set hthresh4 32 32 hthresh6 128 128
405 sleep $((RANDOM%5))
406done
407check_exceptions "exceptions and block policies after hresh change to normal"
408
300for i in 1 2 3 4;do ip netns del ns$i;done 409for i in 1 2 3 4;do ip netns del ns$i;done
301 410
302exit $ret 411exit $ret
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 47ed6cef93fb..c9ff2b47bd1c 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Makefile for netfilter selftests 2# Makefile for netfilter selftests
3 3
4TEST_PROGS := nft_trans_stress.sh 4TEST_PROGS := nft_trans_stress.sh nft_nat.sh
5 5
6include ../lib.mk 6include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
index 1017313e41a8..59caa8f71cd8 100644
--- a/tools/testing/selftests/netfilter/config
+++ b/tools/testing/selftests/netfilter/config
@@ -1,2 +1,2 @@
1CONFIG_NET_NS=y 1CONFIG_NET_NS=y
2NF_TABLES_INET=y 2CONFIG_NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
new file mode 100755
index 000000000000..8ec76681605c
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -0,0 +1,762 @@
1#!/bin/bash
2#
3# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
4#
5
6# Kselftest framework requirement - SKIP code is 4.
7ksft_skip=4
8ret=0
9
10nft --version > /dev/null 2>&1
11if [ $? -ne 0 ];then
12 echo "SKIP: Could not run test without nft tool"
13 exit $ksft_skip
14fi
15
16ip -Version > /dev/null 2>&1
17if [ $? -ne 0 ];then
18 echo "SKIP: Could not run test without ip tool"
19 exit $ksft_skip
20fi
21
22ip netns add ns0
23ip netns add ns1
24ip netns add ns2
25
26ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
27ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
28
29ip -net ns0 link set lo up
30ip -net ns0 link set veth0 up
31ip -net ns0 addr add 10.0.1.1/24 dev veth0
32ip -net ns0 addr add dead:1::1/64 dev veth0
33
34ip -net ns0 link set veth1 up
35ip -net ns0 addr add 10.0.2.1/24 dev veth1
36ip -net ns0 addr add dead:2::1/64 dev veth1
37
38for i in 1 2; do
39 ip -net ns$i link set lo up
40 ip -net ns$i link set eth0 up
41 ip -net ns$i addr add 10.0.$i.99/24 dev eth0
42 ip -net ns$i route add default via 10.0.$i.1
43 ip -net ns$i addr add dead:$i::99/64 dev eth0
44 ip -net ns$i route add default via dead:$i::1
45done
46
47bad_counter()
48{
49 local ns=$1
50 local counter=$2
51 local expect=$3
52
53 echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
54 ip netns exec $ns nft list counter inet filter $counter 1>&2
55}
56
57check_counters()
58{
59 ns=$1
60 local lret=0
61
62 cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
63 if [ $? -ne 0 ]; then
64 bad_counter $ns ns0in "packets 1 bytes 84"
65 lret=1
66 fi
67 cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
68 if [ $? -ne 0 ]; then
69 bad_counter $ns ns0out "packets 1 bytes 84"
70 lret=1
71 fi
72
73 expect="packets 1 bytes 104"
74 cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
75 if [ $? -ne 0 ]; then
76 bad_counter $ns ns0in6 "$expect"
77 lret=1
78 fi
79 cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
80 if [ $? -ne 0 ]; then
81 bad_counter $ns ns0out6 "$expect"
82 lret=1
83 fi
84
85 return $lret
86}
87
88check_ns0_counters()
89{
90 local ns=$1
91 local lret=0
92
93 cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
94 if [ $? -ne 0 ]; then
95 bad_counter ns0 ns0in "packets 0 bytes 0"
96 lret=1
97 fi
98
99 cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
100 if [ $? -ne 0 ]; then
101 bad_counter ns0 ns0in6 "packets 0 bytes 0"
102 lret=1
103 fi
104
105 cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
106 if [ $? -ne 0 ]; then
107 bad_counter ns0 ns0out "packets 0 bytes 0"
108 lret=1
109 fi
110 cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
111 if [ $? -ne 0 ]; then
112 bad_counter ns0 ns0out6 "packets 0 bytes 0"
113 lret=1
114 fi
115
116 for dir in "in" "out" ; do
117 expect="packets 1 bytes 84"
118 cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
119 if [ $? -ne 0 ]; then
120 bad_counter ns0 $ns$dir "$expect"
121 lret=1
122 fi
123
124 expect="packets 1 bytes 104"
125 cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
126 if [ $? -ne 0 ]; then
127 bad_counter ns0 $ns$dir6 "$expect"
128 lret=1
129 fi
130 done
131
132 return $lret
133}
134
135reset_counters()
136{
137 for i in 0 1 2;do
138 ip netns exec ns$i nft reset counters inet > /dev/null
139 done
140}
141
142test_local_dnat6()
143{
144 local lret=0
145ip netns exec ns0 nft -f - <<EOF
146table ip6 nat {
147 chain output {
148 type nat hook output priority 0; policy accept;
149 ip6 daddr dead:1::99 dnat to dead:2::99
150 }
151}
152EOF
153 if [ $? -ne 0 ]; then
154 echo "SKIP: Could not add add ip6 dnat hook"
155 return $ksft_skip
156 fi
157
158 # ping netns1, expect rewrite to netns2
159 ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
160 if [ $? -ne 0 ]; then
161 lret=1
162 echo "ERROR: ping6 failed"
163 return $lret
164 fi
165
166 expect="packets 0 bytes 0"
167 for dir in "in6" "out6" ; do
168 cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
169 if [ $? -ne 0 ]; then
170 bad_counter ns0 ns1$dir "$expect"
171 lret=1
172 fi
173 done
174
175 expect="packets 1 bytes 104"
176 for dir in "in6" "out6" ; do
177 cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
178 if [ $? -ne 0 ]; then
179 bad_counter ns0 ns2$dir "$expect"
180 lret=1
181 fi
182 done
183
184 # expect 0 count in ns1
185 expect="packets 0 bytes 0"
186 for dir in "in6" "out6" ; do
187 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
188 if [ $? -ne 0 ]; then
189 bad_counter ns1 ns0$dir "$expect"
190 lret=1
191 fi
192 done
193
194 # expect 1 packet in ns2
195 expect="packets 1 bytes 104"
196 for dir in "in6" "out6" ; do
197 cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
198 if [ $? -ne 0 ]; then
199 bad_counter ns2 ns0$dir "$expect"
200 lret=1
201 fi
202 done
203
204 test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
205 ip netns exec ns0 nft flush chain ip6 nat output
206
207 return $lret
208}
209
210test_local_dnat()
211{
212 local lret=0
213ip netns exec ns0 nft -f - <<EOF
214table ip nat {
215 chain output {
216 type nat hook output priority 0; policy accept;
217 ip daddr 10.0.1.99 dnat to 10.0.2.99
218 }
219}
220EOF
221 # ping netns1, expect rewrite to netns2
222 ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
223 if [ $? -ne 0 ]; then
224 lret=1
225 echo "ERROR: ping failed"
226 return $lret
227 fi
228
229 expect="packets 0 bytes 0"
230 for dir in "in" "out" ; do
231 cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
232 if [ $? -ne 0 ]; then
233 bad_counter ns0 ns1$dir "$expect"
234 lret=1
235 fi
236 done
237
238 expect="packets 1 bytes 84"
239 for dir in "in" "out" ; do
240 cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
241 if [ $? -ne 0 ]; then
242 bad_counter ns0 ns2$dir "$expect"
243 lret=1
244 fi
245 done
246
247 # expect 0 count in ns1
248 expect="packets 0 bytes 0"
249 for dir in "in" "out" ; do
250 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
251 if [ $? -ne 0 ]; then
252 bad_counter ns1 ns0$dir "$expect"
253 lret=1
254 fi
255 done
256
257 # expect 1 packet in ns2
258 expect="packets 1 bytes 84"
259 for dir in "in" "out" ; do
260 cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
261 if [ $? -ne 0 ]; then
262 bad_counter ns2 ns0$dir "$expect"
263 lret=1
264 fi
265 done
266
267 test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
268
269 ip netns exec ns0 nft flush chain ip nat output
270
271 reset_counters
272 ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
273 if [ $? -ne 0 ]; then
274 lret=1
275 echo "ERROR: ping failed"
276 return $lret
277 fi
278
279 expect="packets 1 bytes 84"
280 for dir in "in" "out" ; do
281 cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
282 if [ $? -ne 0 ]; then
283 bad_counter ns1 ns1$dir "$expect"
284 lret=1
285 fi
286 done
287 expect="packets 0 bytes 0"
288 for dir in "in" "out" ; do
289 cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
290 if [ $? -ne 0 ]; then
291 bad_counter ns0 ns2$dir "$expect"
292 lret=1
293 fi
294 done
295
296 # expect 1 count in ns1
297 expect="packets 1 bytes 84"
298 for dir in "in" "out" ; do
299 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
300 if [ $? -ne 0 ]; then
301 bad_counter ns0 ns0$dir "$expect"
302 lret=1
303 fi
304 done
305
306 # expect 0 packet in ns2
307 expect="packets 0 bytes 0"
308 for dir in "in" "out" ; do
309 cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
310 if [ $? -ne 0 ]; then
311 bad_counter ns2 ns2$dir "$expect"
312 lret=1
313 fi
314 done
315
316 test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
317
318 return $lret
319}
320
321
322test_masquerade6()
323{
324 local lret=0
325
326 ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
327
328 ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
329 if [ $? -ne 0 ] ; then
330 echo "ERROR: cannot ping ns1 from ns2 via ipv6"
331 return 1
332 lret=1
333 fi
334
335 expect="packets 1 bytes 104"
336 for dir in "in6" "out6" ; do
337 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
338 if [ $? -ne 0 ]; then
339 bad_counter ns1 ns2$dir "$expect"
340 lret=1
341 fi
342
343 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
344 if [ $? -ne 0 ]; then
345 bad_counter ns2 ns1$dir "$expect"
346 lret=1
347 fi
348 done
349
350 reset_counters
351
352# add masquerading rule
353ip netns exec ns0 nft -f - <<EOF
354table ip6 nat {
355 chain postrouting {
356 type nat hook postrouting priority 0; policy accept;
357 meta oif veth0 masquerade
358 }
359}
360EOF
361 ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
362 if [ $? -ne 0 ] ; then
363 echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
364 lret=1
365 fi
366
367 # ns1 should have seen packets from ns0, due to masquerade
368 expect="packets 1 bytes 104"
369 for dir in "in6" "out6" ; do
370
371 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
372 if [ $? -ne 0 ]; then
373 bad_counter ns1 ns0$dir "$expect"
374 lret=1
375 fi
376
377 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
378 if [ $? -ne 0 ]; then
379 bad_counter ns2 ns1$dir "$expect"
380 lret=1
381 fi
382 done
383
384 # ns1 should not have seen packets from ns2, due to masquerade
385 expect="packets 0 bytes 0"
386 for dir in "in6" "out6" ; do
387 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
388 if [ $? -ne 0 ]; then
389 bad_counter ns1 ns0$dir "$expect"
390 lret=1
391 fi
392
393 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
394 if [ $? -ne 0 ]; then
395 bad_counter ns2 ns1$dir "$expect"
396 lret=1
397 fi
398 done
399
400 ip netns exec ns0 nft flush chain ip6 nat postrouting
401 if [ $? -ne 0 ]; then
402 echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
403 lret=1
404 fi
405
406 test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
407
408 return $lret
409}
410
411test_masquerade()
412{
413 local lret=0
414
415 ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
416 ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
417
418 ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
419 if [ $? -ne 0 ] ; then
420 echo "ERROR: canot ping ns1 from ns2"
421 lret=1
422 fi
423
424 expect="packets 1 bytes 84"
425 for dir in "in" "out" ; do
426 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
427 if [ $? -ne 0 ]; then
428 bad_counter ns1 ns2$dir "$expect"
429 lret=1
430 fi
431
432 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
433 if [ $? -ne 0 ]; then
434 bad_counter ns2 ns1$dir "$expect"
435 lret=1
436 fi
437 done
438
439 reset_counters
440
441# add masquerading rule
442ip netns exec ns0 nft -f - <<EOF
443table ip nat {
444 chain postrouting {
445 type nat hook postrouting priority 0; policy accept;
446 meta oif veth0 masquerade
447 }
448}
449EOF
450 ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
451 if [ $? -ne 0 ] ; then
452 echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
453 lret=1
454 fi
455
456 # ns1 should have seen packets from ns0, due to masquerade
457 expect="packets 1 bytes 84"
458 for dir in "in" "out" ; do
459 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
460 if [ $? -ne 0 ]; then
461 bad_counter ns1 ns0$dir "$expect"
462 lret=1
463 fi
464
465 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
466 if [ $? -ne 0 ]; then
467 bad_counter ns2 ns1$dir "$expect"
468 lret=1
469 fi
470 done
471
472 # ns1 should not have seen packets from ns2, due to masquerade
473 expect="packets 0 bytes 0"
474 for dir in "in" "out" ; do
475 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
476 if [ $? -ne 0 ]; then
477 bad_counter ns1 ns0$dir "$expect"
478 lret=1
479 fi
480
481 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
482 if [ $? -ne 0 ]; then
483 bad_counter ns2 ns1$dir "$expect"
484 lret=1
485 fi
486 done
487
488 ip netns exec ns0 nft flush chain ip nat postrouting
489 if [ $? -ne 0 ]; then
490 echo "ERROR: Could not flush nat postrouting" 1>&2
491 lret=1
492 fi
493
494 test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
495
496 return $lret
497}
498
499test_redirect6()
500{
501 local lret=0
502
503 ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
504
505 ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
506 if [ $? -ne 0 ] ; then
507 echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
508 lret=1
509 fi
510
511 expect="packets 1 bytes 104"
512 for dir in "in6" "out6" ; do
513 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
514 if [ $? -ne 0 ]; then
515 bad_counter ns1 ns2$dir "$expect"
516 lret=1
517 fi
518
519 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
520 if [ $? -ne 0 ]; then
521 bad_counter ns2 ns1$dir "$expect"
522 lret=1
523 fi
524 done
525
526 reset_counters
527
528# add redirect rule
529ip netns exec ns0 nft -f - <<EOF
530table ip6 nat {
531 chain prerouting {
532 type nat hook prerouting priority 0; policy accept;
533 meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
534 }
535}
536EOF
537 ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
538 if [ $? -ne 0 ] ; then
539 echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
540 lret=1
541 fi
542
543 # ns1 should have seen no packets from ns2, due to redirection
544 expect="packets 0 bytes 0"
545 for dir in "in6" "out6" ; do
546 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
547 if [ $? -ne 0 ]; then
548 bad_counter ns1 ns0$dir "$expect"
549 lret=1
550 fi
551 done
552
553 # ns0 should have seen packets from ns2, due to masquerade
554 expect="packets 1 bytes 104"
555 for dir in "in6" "out6" ; do
556 cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
557 if [ $? -ne 0 ]; then
558 bad_counter ns1 ns0$dir "$expect"
559 lret=1
560 fi
561 done
562
563 ip netns exec ns0 nft delete table ip6 nat
564 if [ $? -ne 0 ]; then
565 echo "ERROR: Could not delete ip6 nat table" 1>&2
566 lret=1
567 fi
568
569 test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
570
571 return $lret
572}
573
574test_redirect()
575{
576 local lret=0
577
578 ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
579 ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
580
581 ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
582 if [ $? -ne 0 ] ; then
583 echo "ERROR: cannot ping ns1 from ns2"
584 lret=1
585 fi
586
587 expect="packets 1 bytes 84"
588 for dir in "in" "out" ; do
589 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
590 if [ $? -ne 0 ]; then
591 bad_counter ns1 ns2$dir "$expect"
592 lret=1
593 fi
594
595 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
596 if [ $? -ne 0 ]; then
597 bad_counter ns2 ns1$dir "$expect"
598 lret=1
599 fi
600 done
601
602 reset_counters
603
604# add redirect rule
605ip netns exec ns0 nft -f - <<EOF
606table ip nat {
607 chain prerouting {
608 type nat hook prerouting priority 0; policy accept;
609 meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
610 }
611}
612EOF
613 ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
614 if [ $? -ne 0 ] ; then
615 echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
616 lret=1
617 fi
618
619 # ns1 should have seen no packets from ns2, due to redirection
620 expect="packets 0 bytes 0"
621 for dir in "in" "out" ; do
622
623 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
624 if [ $? -ne 0 ]; then
625 bad_counter ns1 ns0$dir "$expect"
626 lret=1
627 fi
628 done
629
630 # ns0 should have seen packets from ns2, due to masquerade
631 expect="packets 1 bytes 84"
632 for dir in "in" "out" ; do
633 cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
634 if [ $? -ne 0 ]; then
635 bad_counter ns1 ns0$dir "$expect"
636 lret=1
637 fi
638 done
639
640 ip netns exec ns0 nft delete table ip nat
641 if [ $? -ne 0 ]; then
642 echo "ERROR: Could not delete nat table" 1>&2
643 lret=1
644 fi
645
646 test $lret -eq 0 && echo "PASS: IP redirection for ns2"
647
648 return $lret
649}
650
651
652# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
653for i in 0 1 2; do
654ip netns exec ns$i nft -f - <<EOF
655table inet filter {
656 counter ns0in {}
657 counter ns1in {}
658 counter ns2in {}
659
660 counter ns0out {}
661 counter ns1out {}
662 counter ns2out {}
663
664 counter ns0in6 {}
665 counter ns1in6 {}
666 counter ns2in6 {}
667
668 counter ns0out6 {}
669 counter ns1out6 {}
670 counter ns2out6 {}
671
672 map nsincounter {
673 type ipv4_addr : counter
674 elements = { 10.0.1.1 : "ns0in",
675 10.0.2.1 : "ns0in",
676 10.0.1.99 : "ns1in",
677 10.0.2.99 : "ns2in" }
678 }
679
680 map nsincounter6 {
681 type ipv6_addr : counter
682 elements = { dead:1::1 : "ns0in6",
683 dead:2::1 : "ns0in6",
684 dead:1::99 : "ns1in6",
685 dead:2::99 : "ns2in6" }
686 }
687
688 map nsoutcounter {
689 type ipv4_addr : counter
690 elements = { 10.0.1.1 : "ns0out",
691 10.0.2.1 : "ns0out",
692 10.0.1.99: "ns1out",
693 10.0.2.99: "ns2out" }
694 }
695
696 map nsoutcounter6 {
697 type ipv6_addr : counter
698 elements = { dead:1::1 : "ns0out6",
699 dead:2::1 : "ns0out6",
700 dead:1::99 : "ns1out6",
701 dead:2::99 : "ns2out6" }
702 }
703
704 chain input {
705 type filter hook input priority 0; policy accept;
706 counter name ip saddr map @nsincounter
707 icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
708 }
709 chain output {
710 type filter hook output priority 0; policy accept;
711 counter name ip daddr map @nsoutcounter
712 icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
713 }
714}
715EOF
716done
717
718sleep 3
719# test basic connectivity
720for i in 1 2; do
721 ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
722 if [ $? -ne 0 ];then
723 echo "ERROR: Could not reach other namespace(s)" 1>&2
724 ret=1
725 fi
726
727 ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
728 if [ $? -ne 0 ];then
729 echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
730 ret=1
731 fi
732 check_counters ns$i
733 if [ $? -ne 0 ]; then
734 ret=1
735 fi
736
737 check_ns0_counters ns$i
738 if [ $? -ne 0 ]; then
739 ret=1
740 fi
741 reset_counters
742done
743
744if [ $ret -eq 0 ];then
745 echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
746fi
747
748reset_counters
749test_local_dnat
750test_local_dnat6
751
752reset_counters
753test_masquerade
754test_masquerade6
755
756reset_counters
757test_redirect
758test_redirect6
759
760for i in 0 1 2; do ip netns del ns$i;done
761
762exit $ret
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
index 9050eeea5f5f..1de8bd8ccf5d 100644
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -9,6 +9,3 @@ all: $(TEST_PROGS)
9top_srcdir = ../../../../.. 9top_srcdir = ../../../../..
10KSFT_KHDR_INSTALL := 1 10KSFT_KHDR_INSTALL := 1
11include ../../lib.mk 11include ../../lib.mk
12
13clean:
14 rm -fr $(TEST_GEN_FILES)
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c
index 2e563d17cf0c..d1bbafb16f47 100644
--- a/tools/testing/selftests/networking/timestamping/txtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c
@@ -240,7 +240,7 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
240 cm->cmsg_type == IP_RECVERR) || 240 cm->cmsg_type == IP_RECVERR) ||
241 (cm->cmsg_level == SOL_IPV6 && 241 (cm->cmsg_level == SOL_IPV6 &&
242 cm->cmsg_type == IPV6_RECVERR) || 242 cm->cmsg_type == IPV6_RECVERR) ||
243 (cm->cmsg_level = SOL_PACKET && 243 (cm->cmsg_level == SOL_PACKET &&
244 cm->cmsg_type == PACKET_TX_TIMESTAMP)) { 244 cm->cmsg_type == PACKET_TX_TIMESTAMP)) {
245 serr = (void *) CMSG_DATA(cm); 245 serr = (void *) CMSG_DATA(cm);
246 if (serr->ee_errno != ENOMSG || 246 if (serr->ee_errno != ENOMSG ||
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 82121a81681f..29bac5ef9a93 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -10,4 +10,5 @@
10/proc-uptime-002 10/proc-uptime-002
11/read 11/read
12/self 12/self
13/setns-dcache
13/thread-self 14/thread-self
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index 1c12c34cf85d..434d033ee067 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -14,6 +14,7 @@ TEST_GEN_PROGS += proc-uptime-001
14TEST_GEN_PROGS += proc-uptime-002 14TEST_GEN_PROGS += proc-uptime-002
15TEST_GEN_PROGS += read 15TEST_GEN_PROGS += read
16TEST_GEN_PROGS += self 16TEST_GEN_PROGS += self
17TEST_GEN_PROGS += setns-dcache
17TEST_GEN_PROGS += thread-self 18TEST_GEN_PROGS += thread-self
18 19
19include ../lib.mk 20include ../lib.mk
diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
new file mode 100644
index 000000000000..60ab197a73fc
--- /dev/null
+++ b/tools/testing/selftests/proc/setns-dcache.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16/*
17 * Test that setns(CLONE_NEWNET) points to new /proc/net content even
18 * if old one is in dcache.
19 *
20 * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
21 */
22#undef NDEBUG
23#include <assert.h>
24#include <errno.h>
25#include <sched.h>
26#include <signal.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
30#include <unistd.h>
31#include <sys/types.h>
32#include <sys/stat.h>
33#include <fcntl.h>
34#include <sys/socket.h>
35
36static pid_t pid = -1;
37
38static void f(void)
39{
40 if (pid > 0) {
41 kill(pid, SIGTERM);
42 }
43}
44
45int main(void)
46{
47 int fd[2];
48 char _ = 0;
49 int nsfd;
50
51 atexit(f);
52
53 /* Check for priviledges and syscall availability straight away. */
54 if (unshare(CLONE_NEWNET) == -1) {
55 if (errno == ENOSYS || errno == EPERM) {
56 return 4;
57 }
58 return 1;
59 }
60 /* Distinguisher between two otherwise empty net namespaces. */
61 if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
62 return 1;
63 }
64
65 if (pipe(fd) == -1) {
66 return 1;
67 }
68
69 pid = fork();
70 if (pid == -1) {
71 return 1;
72 }
73
74 if (pid == 0) {
75 if (unshare(CLONE_NEWNET) == -1) {
76 return 1;
77 }
78
79 if (write(fd[1], &_, 1) != 1) {
80 return 1;
81 }
82
83 pause();
84
85 return 0;
86 }
87
88 if (read(fd[0], &_, 1) != 1) {
89 return 1;
90 }
91
92 {
93 char buf[64];
94 snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
95 nsfd = open(buf, O_RDONLY);
96 if (nsfd == -1) {
97 return 1;
98 }
99 }
100
101 /* Reliably pin dentry into dcache. */
102 (void)open("/proc/net/unix", O_RDONLY);
103
104 if (setns(nsfd, CLONE_NEWNET) == -1) {
105 return 1;
106 }
107
108 kill(pid, SIGTERM);
109 pid = 0;
110
111 {
112 char buf[4096];
113 ssize_t rv;
114 int fd;
115
116 fd = open("/proc/net/unix", O_RDONLY);
117 if (fd == -1) {
118 return 1;
119 }
120
121#define S "Num RefCount Protocol Flags Type St Inode Path\n"
122 rv = read(fd, buf, sizeof(buf));
123
124 assert(rv == strlen(S));
125 assert(memcmp(buf, S, strlen(S)) == 0);
126 }
127
128 return 0;
129}
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index e20b017e7073..b2065536d407 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -145,15 +145,12 @@ TEST_F(rtc, alarm_alm_set) {
145 145
146 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); 146 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
147 ASSERT_NE(-1, rc); 147 ASSERT_NE(-1, rc);
148 EXPECT_NE(0, rc); 148 ASSERT_NE(0, rc);
149 149
150 /* Disable alarm interrupts */ 150 /* Disable alarm interrupts */
151 rc = ioctl(self->fd, RTC_AIE_OFF, 0); 151 rc = ioctl(self->fd, RTC_AIE_OFF, 0);
152 ASSERT_NE(-1, rc); 152 ASSERT_NE(-1, rc);
153 153
154 if (rc == 0)
155 return;
156
157 rc = read(self->fd, &data, sizeof(unsigned long)); 154 rc = read(self->fd, &data, sizeof(unsigned long));
158 ASSERT_NE(-1, rc); 155 ASSERT_NE(-1, rc);
159 TH_LOG("data: %lx", data); 156 TH_LOG("data: %lx", data);
@@ -202,7 +199,109 @@ TEST_F(rtc, alarm_wkalm_set) {
202 199
203 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); 200 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
204 ASSERT_NE(-1, rc); 201 ASSERT_NE(-1, rc);
205 EXPECT_NE(0, rc); 202 ASSERT_NE(0, rc);
203
204 rc = read(self->fd, &data, sizeof(unsigned long));
205 ASSERT_NE(-1, rc);
206
207 rc = ioctl(self->fd, RTC_RD_TIME, &tm);
208 ASSERT_NE(-1, rc);
209
210 new = timegm((struct tm *)&tm);
211 ASSERT_EQ(new, secs);
212}
213
214TEST_F(rtc, alarm_alm_set_minute) {
215 struct timeval tv = { .tv_sec = 62 };
216 unsigned long data;
217 struct rtc_time tm;
218 fd_set readfds;
219 time_t secs, new;
220 int rc;
221
222 rc = ioctl(self->fd, RTC_RD_TIME, &tm);
223 ASSERT_NE(-1, rc);
224
225 secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec;
226 gmtime_r(&secs, (struct tm *)&tm);
227
228 rc = ioctl(self->fd, RTC_ALM_SET, &tm);
229 if (rc == -1) {
230 ASSERT_EQ(EINVAL, errno);
231 TH_LOG("skip alarms are not supported.");
232 return;
233 }
234
235 rc = ioctl(self->fd, RTC_ALM_READ, &tm);
236 ASSERT_NE(-1, rc);
237
238 TH_LOG("Alarm time now set to %02d:%02d:%02d.",
239 tm.tm_hour, tm.tm_min, tm.tm_sec);
240
241 /* Enable alarm interrupts */
242 rc = ioctl(self->fd, RTC_AIE_ON, 0);
243 ASSERT_NE(-1, rc);
244
245 FD_ZERO(&readfds);
246 FD_SET(self->fd, &readfds);
247
248 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
249 ASSERT_NE(-1, rc);
250 ASSERT_NE(0, rc);
251
252 /* Disable alarm interrupts */
253 rc = ioctl(self->fd, RTC_AIE_OFF, 0);
254 ASSERT_NE(-1, rc);
255
256 rc = read(self->fd, &data, sizeof(unsigned long));
257 ASSERT_NE(-1, rc);
258 TH_LOG("data: %lx", data);
259
260 rc = ioctl(self->fd, RTC_RD_TIME, &tm);
261 ASSERT_NE(-1, rc);
262
263 new = timegm((struct tm *)&tm);
264 ASSERT_EQ(new, secs);
265}
266
267TEST_F(rtc, alarm_wkalm_set_minute) {
268 struct timeval tv = { .tv_sec = 62 };
269 struct rtc_wkalrm alarm = { 0 };
270 struct rtc_time tm;
271 unsigned long data;
272 fd_set readfds;
273 time_t secs, new;
274 int rc;
275
276 rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
277 ASSERT_NE(-1, rc);
278
279 secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec;
280 gmtime_r(&secs, (struct tm *)&alarm.time);
281
282 alarm.enabled = 1;
283
284 rc = ioctl(self->fd, RTC_WKALM_SET, &alarm);
285 if (rc == -1) {
286 ASSERT_EQ(EINVAL, errno);
287 TH_LOG("skip alarms are not supported.");
288 return;
289 }
290
291 rc = ioctl(self->fd, RTC_WKALM_RD, &alarm);
292 ASSERT_NE(-1, rc);
293
294 TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.",
295 alarm.time.tm_mday, alarm.time.tm_mon + 1,
296 alarm.time.tm_year + 1900, alarm.time.tm_hour,
297 alarm.time.tm_min, alarm.time.tm_sec);
298
299 FD_ZERO(&readfds);
300 FD_SET(self->fd, &readfds);
301
302 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
303 ASSERT_NE(-1, rc);
304 ASSERT_NE(0, rc);
206 305
207 rc = read(self->fd, &data, sizeof(unsigned long)); 306 rc = read(self->fd, &data, sizeof(unsigned long));
208 ASSERT_NE(-1, rc); 307 ASSERT_NE(-1, rc);
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
index fce7f4ce0692..1760b3e39730 100644
--- a/tools/testing/selftests/seccomp/Makefile
+++ b/tools/testing/selftests/seccomp/Makefile
@@ -9,7 +9,7 @@ BINARIES := seccomp_bpf seccomp_benchmark
9CFLAGS += -Wl,-no-as-needed -Wall 9CFLAGS += -Wl,-no-as-needed -Wall
10 10
11seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h 11seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h
12 $(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@ 12 $(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@
13 13
14TEST_PROGS += $(BINARIES) 14TEST_PROGS += $(BINARIES)
15EXTRA_CLEAN := $(BINARIES) 15EXTRA_CLEAN := $(BINARIES)
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 067cb4607d6c..7e632b465ab4 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1608,7 +1608,16 @@ TEST_F(TRACE_poke, getpid_runs_normally)
1608#ifdef SYSCALL_NUM_RET_SHARE_REG 1608#ifdef SYSCALL_NUM_RET_SHARE_REG
1609# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) 1609# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
1610#else 1610#else
1611# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action) 1611# define EXPECT_SYSCALL_RETURN(val, action) \
1612 do { \
1613 errno = 0; \
1614 if (val < 0) { \
1615 EXPECT_EQ(-1, action); \
1616 EXPECT_EQ(-(val), errno); \
1617 } else { \
1618 EXPECT_EQ(val, action); \
1619 } \
1620 } while (0)
1612#endif 1621#endif
1613 1622
1614/* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for 1623/* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
@@ -1647,7 +1656,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
1647 1656
1648/* Architecture-specific syscall changing routine. */ 1657/* Architecture-specific syscall changing routine. */
1649void change_syscall(struct __test_metadata *_metadata, 1658void change_syscall(struct __test_metadata *_metadata,
1650 pid_t tracee, int syscall) 1659 pid_t tracee, int syscall, int result)
1651{ 1660{
1652 int ret; 1661 int ret;
1653 ARCH_REGS regs; 1662 ARCH_REGS regs;
@@ -1706,7 +1715,7 @@ void change_syscall(struct __test_metadata *_metadata,
1706#ifdef SYSCALL_NUM_RET_SHARE_REG 1715#ifdef SYSCALL_NUM_RET_SHARE_REG
1707 TH_LOG("Can't modify syscall return on this architecture"); 1716 TH_LOG("Can't modify syscall return on this architecture");
1708#else 1717#else
1709 regs.SYSCALL_RET = EPERM; 1718 regs.SYSCALL_RET = result;
1710#endif 1719#endif
1711 1720
1712#ifdef HAVE_GETREGS 1721#ifdef HAVE_GETREGS
@@ -1734,14 +1743,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
1734 case 0x1002: 1743 case 0x1002:
1735 /* change getpid to getppid. */ 1744 /* change getpid to getppid. */
1736 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); 1745 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
1737 change_syscall(_metadata, tracee, __NR_getppid); 1746 change_syscall(_metadata, tracee, __NR_getppid, 0);
1738 break; 1747 break;
1739 case 0x1003: 1748 case 0x1003:
1740 /* skip gettid. */ 1749 /* skip gettid with valid return code. */
1741 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); 1750 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
1742 change_syscall(_metadata, tracee, -1); 1751 change_syscall(_metadata, tracee, -1, 45000);
1743 break; 1752 break;
1744 case 0x1004: 1753 case 0x1004:
1754 /* skip openat with error. */
1755 EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
1756 change_syscall(_metadata, tracee, -1, -ESRCH);
1757 break;
1758 case 0x1005:
1745 /* do nothing (allow getppid) */ 1759 /* do nothing (allow getppid) */
1746 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); 1760 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
1747 break; 1761 break;
@@ -1774,9 +1788,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
1774 nr = get_syscall(_metadata, tracee); 1788 nr = get_syscall(_metadata, tracee);
1775 1789
1776 if (nr == __NR_getpid) 1790 if (nr == __NR_getpid)
1777 change_syscall(_metadata, tracee, __NR_getppid); 1791 change_syscall(_metadata, tracee, __NR_getppid, 0);
1792 if (nr == __NR_gettid)
1793 change_syscall(_metadata, tracee, -1, 45000);
1778 if (nr == __NR_openat) 1794 if (nr == __NR_openat)
1779 change_syscall(_metadata, tracee, -1); 1795 change_syscall(_metadata, tracee, -1, -ESRCH);
1780} 1796}
1781 1797
1782FIXTURE_DATA(TRACE_syscall) { 1798FIXTURE_DATA(TRACE_syscall) {
@@ -1793,8 +1809,10 @@ FIXTURE_SETUP(TRACE_syscall)
1793 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), 1809 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
1794 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), 1810 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
1795 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), 1811 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
1796 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1812 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
1797 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), 1813 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
1814 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
1815 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
1798 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1816 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1799 }; 1817 };
1800 1818
@@ -1842,15 +1860,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected)
1842 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1860 EXPECT_NE(self->mypid, syscall(__NR_getpid));
1843} 1861}
1844 1862
1845TEST_F(TRACE_syscall, ptrace_syscall_dropped) 1863TEST_F(TRACE_syscall, ptrace_syscall_errno)
1864{
1865 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
1866 teardown_trace_fixture(_metadata, self->tracer);
1867 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
1868 true);
1869
1870 /* Tracer should skip the open syscall, resulting in ESRCH. */
1871 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
1872}
1873
1874TEST_F(TRACE_syscall, ptrace_syscall_faked)
1846{ 1875{
1847 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1876 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
1848 teardown_trace_fixture(_metadata, self->tracer); 1877 teardown_trace_fixture(_metadata, self->tracer);
1849 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1878 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
1850 true); 1879 true);
1851 1880
1852 /* Tracer should skip the open syscall, resulting in EPERM. */ 1881 /* Tracer should skip the gettid syscall, resulting fake pid. */
1853 EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat)); 1882 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
1854} 1883}
1855 1884
1856TEST_F(TRACE_syscall, syscall_allowed) 1885TEST_F(TRACE_syscall, syscall_allowed)
@@ -1883,7 +1912,21 @@ TEST_F(TRACE_syscall, syscall_redirected)
1883 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1912 EXPECT_NE(self->mypid, syscall(__NR_getpid));
1884} 1913}
1885 1914
1886TEST_F(TRACE_syscall, syscall_dropped) 1915TEST_F(TRACE_syscall, syscall_errno)
1916{
1917 long ret;
1918
1919 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1920 ASSERT_EQ(0, ret);
1921
1922 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1923 ASSERT_EQ(0, ret);
1924
1925 /* openat has been skipped and an errno return. */
1926 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
1927}
1928
1929TEST_F(TRACE_syscall, syscall_faked)
1887{ 1930{
1888 long ret; 1931 long ret;
1889 1932
@@ -1894,8 +1937,7 @@ TEST_F(TRACE_syscall, syscall_dropped)
1894 ASSERT_EQ(0, ret); 1937 ASSERT_EQ(0, ret);
1895 1938
1896 /* gettid has been skipped and an altered return value stored. */ 1939 /* gettid has been skipped and an altered return value stored. */
1897 EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid)); 1940 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
1898 EXPECT_NE(self->mytid, syscall(__NR_gettid));
1899} 1941}
1900 1942
1901TEST_F(TRACE_syscall, skip_after_RET_TRACE) 1943TEST_F(TRACE_syscall, skip_after_RET_TRACE)
@@ -3044,7 +3086,7 @@ TEST(user_notification_basic)
3044 /* Check that the basic notification machinery works */ 3086 /* Check that the basic notification machinery works */
3045 listener = user_trap_syscall(__NR_getpid, 3087 listener = user_trap_syscall(__NR_getpid,
3046 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3088 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3047 EXPECT_GE(listener, 0); 3089 ASSERT_GE(listener, 0);
3048 3090
3049 /* Installing a second listener in the chain should EBUSY */ 3091 /* Installing a second listener in the chain should EBUSY */
3050 EXPECT_EQ(user_trap_syscall(__NR_getpid, 3092 EXPECT_EQ(user_trap_syscall(__NR_getpid,
@@ -3103,7 +3145,7 @@ TEST(user_notification_kill_in_middle)
3103 3145
3104 listener = user_trap_syscall(__NR_getpid, 3146 listener = user_trap_syscall(__NR_getpid,
3105 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3147 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3106 EXPECT_GE(listener, 0); 3148 ASSERT_GE(listener, 0);
3107 3149
3108 /* 3150 /*
3109 * Check that nothing bad happens when we kill the task in the middle 3151 * Check that nothing bad happens when we kill the task in the middle
@@ -3152,7 +3194,7 @@ TEST(user_notification_signal)
3152 3194
3153 listener = user_trap_syscall(__NR_gettid, 3195 listener = user_trap_syscall(__NR_gettid,
3154 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3196 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3155 EXPECT_GE(listener, 0); 3197 ASSERT_GE(listener, 0);
3156 3198
3157 pid = fork(); 3199 pid = fork();
3158 ASSERT_GE(pid, 0); 3200 ASSERT_GE(pid, 0);
@@ -3215,7 +3257,7 @@ TEST(user_notification_closed_listener)
3215 3257
3216 listener = user_trap_syscall(__NR_getpid, 3258 listener = user_trap_syscall(__NR_getpid,
3217 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3259 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3218 EXPECT_GE(listener, 0); 3260 ASSERT_GE(listener, 0);
3219 3261
3220 /* 3262 /*
3221 * Check that we get an ENOSYS when the listener is closed. 3263 * Check that we get an ENOSYS when the listener is closed.
@@ -3376,7 +3418,7 @@ TEST(seccomp_get_notif_sizes)
3376{ 3418{
3377 struct seccomp_notif_sizes sizes; 3419 struct seccomp_notif_sizes sizes;
3378 3420
3379 EXPECT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0); 3421 ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0);
3380 EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif)); 3422 EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif));
3381 EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp)); 3423 EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp));
3382} 3424}
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index 637ea0219617..0da3545cabdb 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -17,7 +17,7 @@
17 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2", 17 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
18 "expExitCode": "0", 18 "expExitCode": "0",
19 "verifyCmd": "$TC actions get action ife index 2", 19 "verifyCmd": "$TC actions get action ife index 2",
20 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2", 20 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
21 "matchCount": "1", 21 "matchCount": "1",
22 "teardown": [ 22 "teardown": [
23 "$TC actions flush action ife" 23 "$TC actions flush action ife"
@@ -41,7 +41,7 @@
41 "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2", 41 "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
42 "expExitCode": "0", 42 "expExitCode": "0",
43 "verifyCmd": "$TC actions get action ife index 2", 43 "verifyCmd": "$TC actions get action ife index 2",
44 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2", 44 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
45 "matchCount": "1", 45 "matchCount": "1",
46 "teardown": [ 46 "teardown": [
47 "$TC actions flush action ife" 47 "$TC actions flush action ife"
@@ -65,7 +65,7 @@
65 "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2", 65 "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
66 "expExitCode": "0", 66 "expExitCode": "0",
67 "verifyCmd": "$TC actions get action ife index 2", 67 "verifyCmd": "$TC actions get action ife index 2",
68 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2", 68 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
69 "matchCount": "1", 69 "matchCount": "1",
70 "teardown": [ 70 "teardown": [
71 "$TC actions flush action ife" 71 "$TC actions flush action ife"
@@ -89,7 +89,7 @@
89 "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2", 89 "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
90 "expExitCode": "0", 90 "expExitCode": "0",
91 "verifyCmd": "$TC actions get action ife index 2", 91 "verifyCmd": "$TC actions get action ife index 2",
92 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2", 92 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
93 "matchCount": "1", 93 "matchCount": "1",
94 "teardown": [ 94 "teardown": [
95 "$TC actions flush action ife" 95 "$TC actions flush action ife"
@@ -113,7 +113,7 @@
113 "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2", 113 "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
114 "expExitCode": "0", 114 "expExitCode": "0",
115 "verifyCmd": "$TC actions get action ife index 2", 115 "verifyCmd": "$TC actions get action ife index 2",
116 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2", 116 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
117 "matchCount": "1", 117 "matchCount": "1",
118 "teardown": [ 118 "teardown": [
119 "$TC actions flush action ife" 119 "$TC actions flush action ife"
@@ -137,7 +137,7 @@
137 "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2", 137 "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
138 "expExitCode": "0", 138 "expExitCode": "0",
139 "verifyCmd": "$TC actions get action ife index 2", 139 "verifyCmd": "$TC actions get action ife index 2",
140 "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2", 140 "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
141 "matchCount": "1", 141 "matchCount": "1",
142 "teardown": [ 142 "teardown": [
143 "$TC actions flush action ife" 143 "$TC actions flush action ife"
@@ -161,7 +161,7 @@
161 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90", 161 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
162 "expExitCode": "0", 162 "expExitCode": "0",
163 "verifyCmd": "$TC actions get action ife index 90", 163 "verifyCmd": "$TC actions get action ife index 90",
164 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90", 164 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
165 "matchCount": "1", 165 "matchCount": "1",
166 "teardown": [ 166 "teardown": [
167 "$TC actions flush action ife" 167 "$TC actions flush action ife"
@@ -185,7 +185,7 @@
185 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90", 185 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
186 "expExitCode": "255", 186 "expExitCode": "255",
187 "verifyCmd": "$TC actions get action ife index 90", 187 "verifyCmd": "$TC actions get action ife index 90",
188 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90", 188 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
189 "matchCount": "0", 189 "matchCount": "0",
190 "teardown": [] 190 "teardown": []
191 }, 191 },
@@ -207,7 +207,7 @@
207 "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9", 207 "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
208 "expExitCode": "0", 208 "expExitCode": "0",
209 "verifyCmd": "$TC actions get action ife index 9", 209 "verifyCmd": "$TC actions get action ife index 9",
210 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9", 210 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
211 "matchCount": "1", 211 "matchCount": "1",
212 "teardown": [ 212 "teardown": [
213 "$TC actions flush action ife" 213 "$TC actions flush action ife"
@@ -231,7 +231,7 @@
231 "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9", 231 "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
232 "expExitCode": "0", 232 "expExitCode": "0",
233 "verifyCmd": "$TC actions get action ife index 9", 233 "verifyCmd": "$TC actions get action ife index 9",
234 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9", 234 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
235 "matchCount": "1", 235 "matchCount": "1",
236 "teardown": [ 236 "teardown": [
237 "$TC actions flush action ife" 237 "$TC actions flush action ife"
@@ -255,7 +255,7 @@
255 "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9", 255 "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
256 "expExitCode": "0", 256 "expExitCode": "0",
257 "verifyCmd": "$TC actions get action ife index 9", 257 "verifyCmd": "$TC actions get action ife index 9",
258 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9", 258 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
259 "matchCount": "1", 259 "matchCount": "1",
260 "teardown": [ 260 "teardown": [
261 "$TC actions flush action ife" 261 "$TC actions flush action ife"
@@ -279,7 +279,7 @@
279 "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9", 279 "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
280 "expExitCode": "0", 280 "expExitCode": "0",
281 "verifyCmd": "$TC actions get action ife index 9", 281 "verifyCmd": "$TC actions get action ife index 9",
282 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9", 282 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
283 "matchCount": "1", 283 "matchCount": "1",
284 "teardown": [ 284 "teardown": [
285 "$TC actions flush action ife" 285 "$TC actions flush action ife"
@@ -303,7 +303,7 @@
303 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9", 303 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
304 "expExitCode": "0", 304 "expExitCode": "0",
305 "verifyCmd": "$TC actions get action ife index 9", 305 "verifyCmd": "$TC actions get action ife index 9",
306 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9", 306 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
307 "matchCount": "1", 307 "matchCount": "1",
308 "teardown": [ 308 "teardown": [
309 "$TC actions flush action ife" 309 "$TC actions flush action ife"
@@ -327,7 +327,7 @@
327 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9", 327 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
328 "expExitCode": "0", 328 "expExitCode": "0",
329 "verifyCmd": "$TC actions get action ife index 9", 329 "verifyCmd": "$TC actions get action ife index 9",
330 "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9", 330 "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
331 "matchCount": "1", 331 "matchCount": "1",
332 "teardown": [ 332 "teardown": [
333 "$TC actions flush action ife" 333 "$TC actions flush action ife"
@@ -351,7 +351,7 @@
351 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99", 351 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
352 "expExitCode": "0", 352 "expExitCode": "0",
353 "verifyCmd": "$TC actions get action ife index 99", 353 "verifyCmd": "$TC actions get action ife index 99",
354 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99", 354 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
355 "matchCount": "1", 355 "matchCount": "1",
356 "teardown": [ 356 "teardown": [
357 "$TC actions flush action ife" 357 "$TC actions flush action ife"
@@ -375,7 +375,7 @@
375 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99", 375 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
376 "expExitCode": "255", 376 "expExitCode": "255",
377 "verifyCmd": "$TC actions get action ife index 99", 377 "verifyCmd": "$TC actions get action ife index 99",
378 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99", 378 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
379 "matchCount": "0", 379 "matchCount": "0",
380 "teardown": [] 380 "teardown": []
381 }, 381 },
@@ -397,7 +397,7 @@
397 "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1", 397 "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
398 "expExitCode": "0", 398 "expExitCode": "0",
399 "verifyCmd": "$TC actions get action ife index 1", 399 "verifyCmd": "$TC actions get action ife index 1",
400 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1", 400 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
401 "matchCount": "1", 401 "matchCount": "1",
402 "teardown": [ 402 "teardown": [
403 "$TC actions flush action ife" 403 "$TC actions flush action ife"
@@ -421,7 +421,7 @@
421 "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1", 421 "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
422 "expExitCode": "0", 422 "expExitCode": "0",
423 "verifyCmd": "$TC actions get action ife index 1", 423 "verifyCmd": "$TC actions get action ife index 1",
424 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1", 424 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
425 "matchCount": "1", 425 "matchCount": "1",
426 "teardown": [ 426 "teardown": [
427 "$TC actions flush action ife" 427 "$TC actions flush action ife"
@@ -445,7 +445,7 @@
445 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", 445 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
446 "expExitCode": "0", 446 "expExitCode": "0",
447 "verifyCmd": "$TC actions get action ife index 1", 447 "verifyCmd": "$TC actions get action ife index 1",
448 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", 448 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
449 "matchCount": "1", 449 "matchCount": "1",
450 "teardown": [ 450 "teardown": [
451 "$TC actions flush action ife" 451 "$TC actions flush action ife"
@@ -469,7 +469,7 @@
469 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", 469 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
470 "expExitCode": "0", 470 "expExitCode": "0",
471 "verifyCmd": "$TC actions get action ife index 1", 471 "verifyCmd": "$TC actions get action ife index 1",
472 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", 472 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
473 "matchCount": "1", 473 "matchCount": "1",
474 "teardown": [ 474 "teardown": [
475 "$TC actions flush action ife" 475 "$TC actions flush action ife"
@@ -493,7 +493,7 @@
493 "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77", 493 "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
494 "expExitCode": "0", 494 "expExitCode": "0",
495 "verifyCmd": "$TC actions get action ife index 77", 495 "verifyCmd": "$TC actions get action ife index 77",
496 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77", 496 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
497 "matchCount": "1", 497 "matchCount": "1",
498 "teardown": [ 498 "teardown": [
499 "$TC actions flush action ife" 499 "$TC actions flush action ife"
@@ -517,7 +517,7 @@
517 "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77", 517 "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
518 "expExitCode": "0", 518 "expExitCode": "0",
519 "verifyCmd": "$TC actions get action ife index 77", 519 "verifyCmd": "$TC actions get action ife index 77",
520 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77", 520 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
521 "matchCount": "1", 521 "matchCount": "1",
522 "teardown": [ 522 "teardown": [
523 "$TC actions flush action ife" 523 "$TC actions flush action ife"
@@ -541,7 +541,7 @@
541 "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77", 541 "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
542 "expExitCode": "0", 542 "expExitCode": "0",
543 "verifyCmd": "$TC actions get action ife index 77", 543 "verifyCmd": "$TC actions get action ife index 77",
544 "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77", 544 "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
545 "matchCount": "1", 545 "matchCount": "1",
546 "teardown": [ 546 "teardown": [
547 "$TC actions flush action ife" 547 "$TC actions flush action ife"
@@ -565,7 +565,7 @@
565 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1", 565 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
566 "expExitCode": "0", 566 "expExitCode": "0",
567 "verifyCmd": "$TC actions get action ife index 1", 567 "verifyCmd": "$TC actions get action ife index 1",
568 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1", 568 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
569 "matchCount": "1", 569 "matchCount": "1",
570 "teardown": [ 570 "teardown": [
571 "$TC actions flush action ife" 571 "$TC actions flush action ife"
@@ -589,7 +589,7 @@
589 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1", 589 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
590 "expExitCode": "255", 590 "expExitCode": "255",
591 "verifyCmd": "$TC actions get action ife index 1", 591 "verifyCmd": "$TC actions get action ife index 1",
592 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1", 592 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
593 "matchCount": "0", 593 "matchCount": "0",
594 "teardown": [] 594 "teardown": []
595 }, 595 },
@@ -611,7 +611,7 @@
611 "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1", 611 "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
612 "expExitCode": "0", 612 "expExitCode": "0",
613 "verifyCmd": "$TC actions get action ife index 1", 613 "verifyCmd": "$TC actions get action ife index 1",
614 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1", 614 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
615 "matchCount": "1", 615 "matchCount": "1",
616 "teardown": [ 616 "teardown": [
617 "$TC actions flush action ife" 617 "$TC actions flush action ife"
@@ -635,7 +635,7 @@
635 "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1", 635 "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
636 "expExitCode": "0", 636 "expExitCode": "0",
637 "verifyCmd": "$TC actions get action ife index 1", 637 "verifyCmd": "$TC actions get action ife index 1",
638 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1", 638 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
639 "matchCount": "1", 639 "matchCount": "1",
640 "teardown": [ 640 "teardown": [
641 "$TC actions flush action ife" 641 "$TC actions flush action ife"
@@ -659,7 +659,7 @@
659 "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11", 659 "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
660 "expExitCode": "0", 660 "expExitCode": "0",
661 "verifyCmd": "$TC actions get action ife index 11", 661 "verifyCmd": "$TC actions get action ife index 11",
662 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11", 662 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
663 "matchCount": "1", 663 "matchCount": "1",
664 "teardown": [ 664 "teardown": [
665 "$TC actions flush action ife" 665 "$TC actions flush action ife"
@@ -683,7 +683,7 @@
683 "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1", 683 "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
684 "expExitCode": "0", 684 "expExitCode": "0",
685 "verifyCmd": "$TC actions get action ife index 1", 685 "verifyCmd": "$TC actions get action ife index 1",
686 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1", 686 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
687 "matchCount": "1", 687 "matchCount": "1",
688 "teardown": [ 688 "teardown": [
689 "$TC actions flush action ife" 689 "$TC actions flush action ife"
@@ -707,7 +707,7 @@
707 "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21", 707 "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
708 "expExitCode": "0", 708 "expExitCode": "0",
709 "verifyCmd": "$TC actions get action ife index 21", 709 "verifyCmd": "$TC actions get action ife index 21",
710 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21", 710 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
711 "matchCount": "1", 711 "matchCount": "1",
712 "teardown": [ 712 "teardown": [
713 "$TC actions flush action ife" 713 "$TC actions flush action ife"
@@ -731,7 +731,7 @@
731 "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21", 731 "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
732 "expExitCode": "0", 732 "expExitCode": "0",
733 "verifyCmd": "$TC actions get action ife index 21", 733 "verifyCmd": "$TC actions get action ife index 21",
734 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21", 734 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
735 "matchCount": "1", 735 "matchCount": "1",
736 "teardown": [ 736 "teardown": [
737 "$TC actions flush action ife" 737 "$TC actions flush action ife"
@@ -739,7 +739,7 @@
739 }, 739 },
740 { 740 {
741 "id": "fac3", 741 "id": "fac3",
742 "name": "Create valid ife encode action with index at 32-bit maximnum", 742 "name": "Create valid ife encode action with index at 32-bit maximum",
743 "category": [ 743 "category": [
744 "actions", 744 "actions",
745 "ife" 745 "ife"
@@ -755,7 +755,7 @@
755 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295", 755 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
756 "expExitCode": "0", 756 "expExitCode": "0",
757 "verifyCmd": "$TC actions get action ife index 4294967295", 757 "verifyCmd": "$TC actions get action ife index 4294967295",
758 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295", 758 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
759 "matchCount": "1", 759 "matchCount": "1",
760 "teardown": [ 760 "teardown": [
761 "$TC actions flush action ife" 761 "$TC actions flush action ife"
@@ -779,7 +779,7 @@
779 "cmdUnderTest": "$TC actions add action ife decode pass index 1", 779 "cmdUnderTest": "$TC actions add action ife decode pass index 1",
780 "expExitCode": "0", 780 "expExitCode": "0",
781 "verifyCmd": "$TC actions get action ife index 1", 781 "verifyCmd": "$TC actions get action ife index 1",
782 "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 782 "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
783 "matchCount": "1", 783 "matchCount": "1",
784 "teardown": [ 784 "teardown": [
785 "$TC actions flush action ife" 785 "$TC actions flush action ife"
@@ -803,7 +803,7 @@
803 "cmdUnderTest": "$TC actions add action ife decode pipe index 1", 803 "cmdUnderTest": "$TC actions add action ife decode pipe index 1",
804 "expExitCode": "0", 804 "expExitCode": "0",
805 "verifyCmd": "$TC actions get action ife index 1", 805 "verifyCmd": "$TC actions get action ife index 1",
806 "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 806 "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
807 "matchCount": "1", 807 "matchCount": "1",
808 "teardown": [ 808 "teardown": [
809 "$TC actions flush action ife" 809 "$TC actions flush action ife"
@@ -827,7 +827,7 @@
827 "cmdUnderTest": "$TC actions add action ife decode continue index 1", 827 "cmdUnderTest": "$TC actions add action ife decode continue index 1",
828 "expExitCode": "0", 828 "expExitCode": "0",
829 "verifyCmd": "$TC actions get action ife index 1", 829 "verifyCmd": "$TC actions get action ife index 1",
830 "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 830 "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
831 "matchCount": "1", 831 "matchCount": "1",
832 "teardown": [ 832 "teardown": [
833 "$TC actions flush action ife" 833 "$TC actions flush action ife"
@@ -851,7 +851,7 @@
851 "cmdUnderTest": "$TC actions add action ife decode drop index 1", 851 "cmdUnderTest": "$TC actions add action ife decode drop index 1",
852 "expExitCode": "0", 852 "expExitCode": "0",
853 "verifyCmd": "$TC actions get action ife index 1", 853 "verifyCmd": "$TC actions get action ife index 1",
854 "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 854 "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
855 "matchCount": "1", 855 "matchCount": "1",
856 "teardown": [ 856 "teardown": [
857 "$TC actions flush action ife" 857 "$TC actions flush action ife"
@@ -875,7 +875,7 @@
875 "cmdUnderTest": "$TC actions add action ife decode reclassify index 1", 875 "cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
876 "expExitCode": "0", 876 "expExitCode": "0",
877 "verifyCmd": "$TC actions get action ife index 1", 877 "verifyCmd": "$TC actions get action ife index 1",
878 "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 878 "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
879 "matchCount": "1", 879 "matchCount": "1",
880 "teardown": [ 880 "teardown": [
881 "$TC actions flush action ife" 881 "$TC actions flush action ife"
@@ -899,7 +899,7 @@
899 "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1", 899 "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
900 "expExitCode": "0", 900 "expExitCode": "0",
901 "verifyCmd": "$TC actions get action ife index 1", 901 "verifyCmd": "$TC actions get action ife index 1",
902 "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 902 "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
903 "matchCount": "1", 903 "matchCount": "1",
904 "teardown": [ 904 "teardown": [
905 "$TC actions flush action ife" 905 "$TC actions flush action ife"
@@ -923,7 +923,7 @@
923 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999", 923 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
924 "expExitCode": "255", 924 "expExitCode": "255",
925 "verifyCmd": "$TC actions get action ife index 4294967295999", 925 "verifyCmd": "$TC actions get action ife index 4294967295999",
926 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999", 926 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
927 "matchCount": "0", 927 "matchCount": "0",
928 "teardown": [] 928 "teardown": []
929 }, 929 },
@@ -945,7 +945,7 @@
945 "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4", 945 "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
946 "expExitCode": "255", 946 "expExitCode": "255",
947 "verifyCmd": "$TC actions get action ife index 4", 947 "verifyCmd": "$TC actions get action ife index 4",
948 "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4", 948 "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
949 "matchCount": "0", 949 "matchCount": "0",
950 "teardown": [] 950 "teardown": []
951 }, 951 },
@@ -967,7 +967,7 @@
967 "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1", 967 "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
968 "expExitCode": "0", 968 "expExitCode": "0",
969 "verifyCmd": "$TC actions get action ife index 4", 969 "verifyCmd": "$TC actions get action ife index 4",
970 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1", 970 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
971 "matchCount": "1", 971 "matchCount": "1",
972 "teardown": [ 972 "teardown": [
973 "$TC actions flush action ife" 973 "$TC actions flush action ife"
@@ -991,7 +991,7 @@
991 "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4", 991 "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
992 "expExitCode": "255", 992 "expExitCode": "255",
993 "verifyCmd": "$TC actions get action ife index 4", 993 "verifyCmd": "$TC actions get action ife index 4",
994 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4", 994 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
995 "matchCount": "0", 995 "matchCount": "0",
996 "teardown": [] 996 "teardown": []
997 }, 997 },
@@ -1013,7 +1013,7 @@
1013 "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4", 1013 "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
1014 "expExitCode": "255", 1014 "expExitCode": "255",
1015 "verifyCmd": "$TC actions get action ife index 4", 1015 "verifyCmd": "$TC actions get action ife index 4",
1016 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4", 1016 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
1017 "matchCount": "0", 1017 "matchCount": "0",
1018 "teardown": [] 1018 "teardown": []
1019 }, 1019 },
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index 10b2d894e436..e7e15a7336b6 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -82,35 +82,6 @@
82 ] 82 ]
83 }, 83 },
84 { 84 {
85 "id": "ba4e",
86 "name": "Add tunnel_key set action with missing mandatory id parameter",
87 "category": [
88 "actions",
89 "tunnel_key"
90 ],
91 "setup": [
92 [
93 "$TC actions flush action tunnel_key",
94 0,
95 1,
96 255
97 ]
98 ],
99 "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
100 "expExitCode": "255",
101 "verifyCmd": "$TC actions list action tunnel_key",
102 "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
103 "matchCount": "0",
104 "teardown": [
105 [
106 "$TC actions flush action tunnel_key",
107 0,
108 1,
109 255
110 ]
111 ]
112 },
113 {
114 "id": "a5e0", 85 "id": "a5e0",
115 "name": "Add tunnel_key set action with invalid src_ip parameter", 86 "name": "Add tunnel_key set action with invalid src_ip parameter",
116 "category": [ 87 "category": [
@@ -634,7 +605,7 @@
634 "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", 605 "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
635 "expExitCode": "0", 606 "expExitCode": "0",
636 "verifyCmd": "$TC actions get action tunnel_key index 4", 607 "verifyCmd": "$TC actions get action tunnel_key index 4",
637 "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", 608 "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
638 "matchCount": "1", 609 "matchCount": "1",
639 "teardown": [ 610 "teardown": [
640 "$TC actions flush action tunnel_key" 611 "$TC actions flush action tunnel_key"
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index c02683cfb6c9..7656c7ce79d9 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2CFLAGS += -O3 -Wl,-no-as-needed -Wall 2CFLAGS += -O3 -Wl,-no-as-needed -Wall
3LDFLAGS += -lrt -lpthread -lm 3LDLIBS += -lrt -lpthread -lm
4 4
5# these are all "safe" tests that don't modify 5# these are all "safe" tests that don't modify
6# system time or require escalated privileges 6# system time or require escalated privileges
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index 880b96fc80d4..c0534e298b51 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -25,6 +25,7 @@ struct gup_benchmark {
25 __u64 size; 25 __u64 size;
26 __u32 nr_pages_per_call; 26 __u32 nr_pages_per_call;
27 __u32 flags; 27 __u32 flags;
28 __u64 expansion[10]; /* For future use */
28}; 29};
29 30
30int main(int argc, char **argv) 31int main(int argc, char **argv)
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
index 50f7e9272481..bf1bb15b6fbe 100644
--- a/tools/testing/selftests/x86/mpx-mini-test.c
+++ b/tools/testing/selftests/x86/mpx-mini-test.c
@@ -1503,7 +1503,7 @@ exit:
1503 exit(20); 1503 exit(20);
1504 } 1504 }
1505 if (successes != total_nr_tests) { 1505 if (successes != total_nr_tests) {
1506 eprintf("ERROR: succeded fewer than number of tries (%d != %d)\n", 1506 eprintf("ERROR: succeeded fewer than number of tries (%d != %d)\n",
1507 successes, total_nr_tests); 1507 successes, total_nr_tests);
1508 exit(21); 1508 exit(21);
1509 } 1509 }
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 460b4bdf4c1e..5d546dcdbc80 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
1133 pkey_assert(err); 1133 pkey_assert(err);
1134} 1134}
1135 1135
1136void become_child(void)
1137{
1138 pid_t forkret;
1139
1140 forkret = fork();
1141 pkey_assert(forkret >= 0);
1142 dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
1143
1144 if (!forkret) {
1145 /* in the child */
1146 return;
1147 }
1148 exit(0);
1149}
1150
1136/* Assumes that all pkeys other than 'pkey' are unallocated */ 1151/* Assumes that all pkeys other than 'pkey' are unallocated */
1137void test_pkey_alloc_exhaust(int *ptr, u16 pkey) 1152void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1138{ 1153{
@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1141 int nr_allocated_pkeys = 0; 1156 int nr_allocated_pkeys = 0;
1142 int i; 1157 int i;
1143 1158
1144 for (i = 0; i < NR_PKEYS*2; i++) { 1159 for (i = 0; i < NR_PKEYS*3; i++) {
1145 int new_pkey; 1160 int new_pkey;
1146 dprintf1("%s() alloc loop: %d\n", __func__, i); 1161 dprintf1("%s() alloc loop: %d\n", __func__, i);
1147 new_pkey = alloc_pkey(); 1162 new_pkey = alloc_pkey();
@@ -1152,21 +1167,27 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1152 if ((new_pkey == -1) && (errno == ENOSPC)) { 1167 if ((new_pkey == -1) && (errno == ENOSPC)) {
1153 dprintf2("%s() failed to allocate pkey after %d tries\n", 1168 dprintf2("%s() failed to allocate pkey after %d tries\n",
1154 __func__, nr_allocated_pkeys); 1169 __func__, nr_allocated_pkeys);
1155 break; 1170 } else {
1171 /*
1172 * Ensure the number of successes never
1173 * exceeds the number of keys supported
1174 * in the hardware.
1175 */
1176 pkey_assert(nr_allocated_pkeys < NR_PKEYS);
1177 allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
1156 } 1178 }
1157 pkey_assert(nr_allocated_pkeys < NR_PKEYS); 1179
1158 allocated_pkeys[nr_allocated_pkeys++] = new_pkey; 1180 /*
1181 * Make sure that allocation state is properly
1182 * preserved across fork().
1183 */
1184 if (i == NR_PKEYS*2)
1185 become_child();
1159 } 1186 }
1160 1187
1161 dprintf3("%s()::%d\n", __func__, __LINE__); 1188 dprintf3("%s()::%d\n", __func__, __LINE__);
1162 1189
1163 /* 1190 /*
1164 * ensure it did not reach the end of the loop without
1165 * failure:
1166 */
1167 pkey_assert(i < NR_PKEYS*2);
1168
1169 /*
1170 * There are 16 pkeys supported in hardware. Three are 1191 * There are 16 pkeys supported in hardware. Three are
1171 * allocated by the time we get here: 1192 * allocated by the time we get here:
1172 * 1. The default key (0) 1193 * 1. The default key (0)
diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c
index 00a26a82fa98..97311333700e 100644
--- a/tools/testing/selftests/x86/unwind_vdso.c
+++ b/tools/testing/selftests/x86/unwind_vdso.c
@@ -44,7 +44,6 @@ int main()
44#include <stdbool.h> 44#include <stdbool.h>
45#include <sys/ptrace.h> 45#include <sys/ptrace.h>
46#include <sys/user.h> 46#include <sys/user.h>
47#include <sys/ucontext.h>
48#include <link.h> 47#include <link.h>
49#include <sys/auxv.h> 48#include <sys/auxv.h>
50#include <dlfcn.h> 49#include <dlfcn.h>
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index 89a2444c1df2..59e417ec3e13 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -6,7 +6,7 @@ VERSION = 1.0
6 6
7BINDIR=usr/bin 7BINDIR=usr/bin
8WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int 8WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int
9override CFLAGS+= -O1 ${WARNFLAGS} 9override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS}
10# Add "-fstack-protector" only if toolchain supports it. 10# Add "-fstack-protector" only if toolchain supports it.
11override CFLAGS+= $(call cc-option,-fstack-protector-strong) 11override CFLAGS+= $(call cc-option,-fstack-protector-strong)
12CC?= $(CROSS_COMPILE)gcc 12CC?= $(CROSS_COMPILE)gcc
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
index 18fc112b65cd..d3a8755c039c 100644
--- a/tools/vm/page_owner_sort.c
+++ b/tools/vm/page_owner_sort.c
@@ -5,7 +5,9 @@
5 * Example use: 5 * Example use:
6 * cat /sys/kernel/debug/page_owner > page_owner_full.txt 6 * cat /sys/kernel/debug/page_owner > page_owner_full.txt
7 * grep -v ^PFN page_owner_full.txt > page_owner.txt 7 * grep -v ^PFN page_owner_full.txt > page_owner.txt
8 * ./sort page_owner.txt sorted_page_owner.txt 8 * ./page_owner_sort page_owner.txt sorted_page_owner.txt
9 *
10 * See Documentation/vm/page_owner.rst
9*/ 11*/
10 12
11#include <stdio.h> 13#include <stdio.h>
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 9e350fd34504..9c486fad3f9f 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
626 /* Awaken to handle a signal, request we sleep again later. */ 626 /* Awaken to handle a signal, request we sleep again later. */
627 kvm_make_request(KVM_REQ_SLEEP, vcpu); 627 kvm_make_request(KVM_REQ_SLEEP, vcpu);
628 } 628 }
629
630 /*
631 * Make sure we will observe a potential reset request if we've
632 * observed a change to the power state. Pairs with the smp_wmb() in
633 * kvm_psci_vcpu_on().
634 */
635 smp_rmb();
629} 636}
630 637
631static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) 638static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
639 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) 646 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
640 vcpu_req_sleep(vcpu); 647 vcpu_req_sleep(vcpu);
641 648
649 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
650 kvm_reset_vcpu(vcpu);
651
642 /* 652 /*
643 * Clear IRQ_PENDING requests that were made to guarantee 653 * Clear IRQ_PENDING requests that were made to guarantee
644 * that a VCPU sees new virtual interrupts. 654 * that a VCPU sees new virtual interrupts.
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index fbdf3ac2f001..30251e288629 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1695 1695
1696 vma_pagesize = vma_kernel_pagesize(vma); 1696 vma_pagesize = vma_kernel_pagesize(vma);
1697 /* 1697 /*
1698 * PUD level may not exist for a VM but PMD is guaranteed to 1698 * The stage2 has a minimum of 2 level table (For arm64 see
1699 * exist. 1699 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1700 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1701 * As for PUD huge maps, we must make sure that we have at least
1702 * 3 levels, i.e, PMD is not folded.
1700 */ 1703 */
1701 if ((vma_pagesize == PMD_SIZE || 1704 if ((vma_pagesize == PMD_SIZE ||
1702 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) && 1705 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
1703 !force_pte) { 1706 !force_pte) {
1704 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 1707 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1705 } 1708 }
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 9b73d3ad918a..34d08ee63747 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
104 104
105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) 105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
106{ 106{
107 struct vcpu_reset_state *reset_state;
107 struct kvm *kvm = source_vcpu->kvm; 108 struct kvm *kvm = source_vcpu->kvm;
108 struct kvm_vcpu *vcpu = NULL; 109 struct kvm_vcpu *vcpu = NULL;
109 struct swait_queue_head *wq;
110 unsigned long cpu_id; 110 unsigned long cpu_id;
111 unsigned long context_id;
112 phys_addr_t target_pc;
113 111
114 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; 112 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
115 if (vcpu_mode_is_32bit(source_vcpu)) 113 if (vcpu_mode_is_32bit(source_vcpu))
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
130 return PSCI_RET_INVALID_PARAMS; 128 return PSCI_RET_INVALID_PARAMS;
131 } 129 }
132 130
133 target_pc = smccc_get_arg2(source_vcpu); 131 reset_state = &vcpu->arch.reset_state;
134 context_id = smccc_get_arg3(source_vcpu);
135 132
136 kvm_reset_vcpu(vcpu); 133 reset_state->pc = smccc_get_arg2(source_vcpu);
137
138 /* Gracefully handle Thumb2 entry point */
139 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
140 target_pc &= ~((phys_addr_t) 1);
141 vcpu_set_thumb(vcpu);
142 }
143 134
144 /* Propagate caller endianness */ 135 /* Propagate caller endianness */
145 if (kvm_vcpu_is_be(source_vcpu)) 136 reset_state->be = kvm_vcpu_is_be(source_vcpu);
146 kvm_vcpu_set_be(vcpu);
147 137
148 *vcpu_pc(vcpu) = target_pc;
149 /* 138 /*
150 * NOTE: We always update r0 (or x0) because for PSCI v0.1 139 * NOTE: We always update r0 (or x0) because for PSCI v0.1
151 * the general puspose registers are undefined upon CPU_ON. 140 * the general puspose registers are undefined upon CPU_ON.
152 */ 141 */
153 smccc_set_retval(vcpu, context_id, 0, 0, 0); 142 reset_state->r0 = smccc_get_arg3(source_vcpu);
154 vcpu->arch.power_off = false; 143
155 smp_mb(); /* Make sure the above is visible */ 144 WRITE_ONCE(reset_state->reset, true);
145 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
156 146
157 wq = kvm_arch_vcpu_wq(vcpu); 147 /*
158 swake_up_one(wq); 148 * Make sure the reset request is observed if the change to
149 * power_state is observed.
150 */
151 smp_wmb();
152
153 vcpu->arch.power_off = false;
154 kvm_vcpu_wake_up(vcpu);
159 155
160 return PSCI_RET_SUCCESS; 156 return PSCI_RET_SUCCESS;
161} 157}
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 07aa900bac56..1f62f2b8065d 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
251 return 0; 251 return 0;
252 } 252 }
253 253
254 spin_lock_irqsave(&irq->irq_lock, flags); 254 raw_spin_lock_irqsave(&irq->irq_lock, flags);
255 print_irq_state(s, irq, vcpu); 255 print_irq_state(s, irq, vcpu);
256 spin_unlock_irqrestore(&irq->irq_lock, flags); 256 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
257 257
258 vgic_put_irq(kvm, irq); 258 vgic_put_irq(kvm, irq);
259 return 0; 259 return 0;
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index c0c0b88af1d5..3bdb31eaed64 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
64 struct vgic_dist *dist = &kvm->arch.vgic; 64 struct vgic_dist *dist = &kvm->arch.vgic;
65 65
66 INIT_LIST_HEAD(&dist->lpi_list_head); 66 INIT_LIST_HEAD(&dist->lpi_list_head);
67 spin_lock_init(&dist->lpi_list_lock); 67 raw_spin_lock_init(&dist->lpi_list_lock);
68} 68}
69 69
70/* CREATION */ 70/* CREATION */
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
171 171
172 irq->intid = i + VGIC_NR_PRIVATE_IRQS; 172 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
173 INIT_LIST_HEAD(&irq->ap_list); 173 INIT_LIST_HEAD(&irq->ap_list);
174 spin_lock_init(&irq->irq_lock); 174 raw_spin_lock_init(&irq->irq_lock);
175 irq->vcpu = NULL; 175 irq->vcpu = NULL;
176 irq->target_vcpu = vcpu0; 176 irq->target_vcpu = vcpu0;
177 kref_init(&irq->refcount); 177 kref_init(&irq->refcount);
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
206 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; 206 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
207 207
208 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 208 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
209 spin_lock_init(&vgic_cpu->ap_list_lock); 209 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
210 210
211 /* 211 /*
212 * Enable and configure all SGIs to be edge-triggered and 212 * Enable and configure all SGIs to be edge-triggered and
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
217 217
218 INIT_LIST_HEAD(&irq->ap_list); 218 INIT_LIST_HEAD(&irq->ap_list);
219 spin_lock_init(&irq->irq_lock); 219 raw_spin_lock_init(&irq->irq_lock);
220 irq->intid = i; 220 irq->intid = i;
221 irq->vcpu = NULL; 221 irq->vcpu = NULL;
222 irq->target_vcpu = vcpu; 222 irq->target_vcpu = vcpu;
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
231 irq->config = VGIC_CONFIG_LEVEL; 231 irq->config = VGIC_CONFIG_LEVEL;
232 } 232 }
233 233
234 /*
235 * GICv3 can only be created via the KVM_DEVICE_CREATE API and
236 * so we always know the emulation type at this point as it's
237 * either explicitly configured as GICv3, or explicitly
238 * configured as GICv2, or not configured yet which also
239 * implies GICv2.
240 */
241 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 234 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
242 irq->group = 1; 235 irq->group = 1;
243 else 236 else
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm)
281{ 274{
282 struct vgic_dist *dist = &kvm->arch.vgic; 275 struct vgic_dist *dist = &kvm->arch.vgic;
283 struct kvm_vcpu *vcpu; 276 struct kvm_vcpu *vcpu;
284 int ret = 0, i; 277 int ret = 0, i, idx;
285 278
286 if (vgic_initialized(kvm)) 279 if (vgic_initialized(kvm))
287 return 0; 280 return 0;
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm)
298 if (ret) 291 if (ret)
299 goto out; 292 goto out;
300 293
294 /* Initialize groups on CPUs created before the VGIC type was known */
295 kvm_for_each_vcpu(idx, vcpu, kvm) {
296 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
297
298 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
299 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
300 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
301 irq->group = 1;
302 else
303 irq->group = 0;
304 }
305 }
306
301 if (vgic_has_its(kvm)) { 307 if (vgic_has_its(kvm)) {
302 ret = vgic_v4_init(kvm); 308 ret = vgic_v4_init(kvm);
303 if (ret) 309 if (ret)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index eb2a390a6c86..ab3f47745d9c 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
65 65
66 INIT_LIST_HEAD(&irq->lpi_list); 66 INIT_LIST_HEAD(&irq->lpi_list);
67 INIT_LIST_HEAD(&irq->ap_list); 67 INIT_LIST_HEAD(&irq->ap_list);
68 spin_lock_init(&irq->irq_lock); 68 raw_spin_lock_init(&irq->irq_lock);
69 69
70 irq->config = VGIC_CONFIG_EDGE; 70 irq->config = VGIC_CONFIG_EDGE;
71 kref_init(&irq->refcount); 71 kref_init(&irq->refcount);
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
73 irq->target_vcpu = vcpu; 73 irq->target_vcpu = vcpu;
74 irq->group = 1; 74 irq->group = 1;
75 75
76 spin_lock_irqsave(&dist->lpi_list_lock, flags); 76 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
77 77
78 /* 78 /*
79 * There could be a race with another vgic_add_lpi(), so we need to 79 * There could be a race with another vgic_add_lpi(), so we need to
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
101 dist->lpi_list_count++; 101 dist->lpi_list_count++;
102 102
103out_unlock: 103out_unlock:
104 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 104 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
105 105
106 /* 106 /*
107 * We "cache" the configuration table entries in our struct vgic_irq's. 107 * We "cache" the configuration table entries in our struct vgic_irq's.
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
287 if (ret) 287 if (ret)
288 return ret; 288 return ret;
289 289
290 spin_lock_irqsave(&irq->irq_lock, flags); 290 raw_spin_lock_irqsave(&irq->irq_lock, flags);
291 291
292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { 292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
293 irq->priority = LPI_PROP_PRIORITY(prop); 293 irq->priority = LPI_PROP_PRIORITY(prop);
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
299 } 299 }
300 } 300 }
301 301
302 spin_unlock_irqrestore(&irq->irq_lock, flags); 302 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
303 303
304 if (irq->hw) 304 if (irq->hw)
305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); 305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
332 if (!intids) 332 if (!intids)
333 return -ENOMEM; 333 return -ENOMEM;
334 334
335 spin_lock_irqsave(&dist->lpi_list_lock, flags); 335 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
337 if (i == irq_count) 337 if (i == irq_count)
338 break; 338 break;
@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
341 continue; 341 continue;
342 intids[i++] = irq->intid; 342 intids[i++] = irq->intid;
343 } 343 }
344 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 344 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
345 345
346 *intid_ptr = intids; 346 *intid_ptr = intids;
347 return i; 347 return i;
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
352 int ret = 0; 352 int ret = 0;
353 unsigned long flags; 353 unsigned long flags;
354 354
355 spin_lock_irqsave(&irq->irq_lock, flags); 355 raw_spin_lock_irqsave(&irq->irq_lock, flags);
356 irq->target_vcpu = vcpu; 356 irq->target_vcpu = vcpu;
357 spin_unlock_irqrestore(&irq->irq_lock, flags); 357 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
358 358
359 if (irq->hw) { 359 if (irq->hw) {
360 struct its_vlpi_map map; 360 struct its_vlpi_map map;
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
455 } 455 }
456 456
457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); 457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
458 spin_lock_irqsave(&irq->irq_lock, flags); 458 raw_spin_lock_irqsave(&irq->irq_lock, flags);
459 irq->pending_latch = pendmask & (1U << bit_nr); 459 irq->pending_latch = pendmask & (1U << bit_nr);
460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
461 vgic_put_irq(vcpu->kvm, irq); 461 vgic_put_irq(vcpu->kvm, irq);
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
612 return irq_set_irqchip_state(irq->host_irq, 612 return irq_set_irqchip_state(irq->host_irq,
613 IRQCHIP_STATE_PENDING, true); 613 IRQCHIP_STATE_PENDING, true);
614 614
615 spin_lock_irqsave(&irq->irq_lock, flags); 615 raw_spin_lock_irqsave(&irq->irq_lock, flags);
616 irq->pending_latch = true; 616 irq->pending_latch = true;
617 vgic_queue_irq_unlock(kvm, irq, flags); 617 vgic_queue_irq_unlock(kvm, irq, flags);
618 618
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 738b65d2d0e7..b535fffc7400 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
147 147
148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); 148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
149 149
150 spin_lock_irqsave(&irq->irq_lock, flags); 150 raw_spin_lock_irqsave(&irq->irq_lock, flags);
151 irq->pending_latch = true; 151 irq->pending_latch = true;
152 irq->source |= 1U << source_vcpu->vcpu_id; 152 irq->source |= 1U << source_vcpu->vcpu_id;
153 153
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); 191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
192 int target; 192 int target;
193 193
194 spin_lock_irqsave(&irq->irq_lock, flags); 194 raw_spin_lock_irqsave(&irq->irq_lock, flags);
195 195
196 irq->targets = (val >> (i * 8)) & cpu_mask; 196 irq->targets = (val >> (i * 8)) & cpu_mask;
197 target = irq->targets ? __ffs(irq->targets) : 0; 197 target = irq->targets ? __ffs(irq->targets) : 0;
198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); 198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
199 199
200 spin_unlock_irqrestore(&irq->irq_lock, flags); 200 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
201 vgic_put_irq(vcpu->kvm, irq); 201 vgic_put_irq(vcpu->kvm, irq);
202 } 202 }
203} 203}
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
230 for (i = 0; i < len; i++) { 230 for (i = 0; i < len; i++) {
231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
232 232
233 spin_lock_irqsave(&irq->irq_lock, flags); 233 raw_spin_lock_irqsave(&irq->irq_lock, flags);
234 234
235 irq->source &= ~((val >> (i * 8)) & 0xff); 235 irq->source &= ~((val >> (i * 8)) & 0xff);
236 if (!irq->source) 236 if (!irq->source)
237 irq->pending_latch = false; 237 irq->pending_latch = false;
238 238
239 spin_unlock_irqrestore(&irq->irq_lock, flags); 239 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
240 vgic_put_irq(vcpu->kvm, irq); 240 vgic_put_irq(vcpu->kvm, irq);
241 } 241 }
242} 242}
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
252 for (i = 0; i < len; i++) { 252 for (i = 0; i < len; i++) {
253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
254 254
255 spin_lock_irqsave(&irq->irq_lock, flags); 255 raw_spin_lock_irqsave(&irq->irq_lock, flags);
256 256
257 irq->source |= (val >> (i * 8)) & 0xff; 257 irq->source |= (val >> (i * 8)) & 0xff;
258 258
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
260 irq->pending_latch = true; 260 irq->pending_latch = true;
261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
262 } else { 262 } else {
263 spin_unlock_irqrestore(&irq->irq_lock, flags); 263 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
264 } 264 }
265 vgic_put_irq(vcpu->kvm, irq); 265 vgic_put_irq(vcpu->kvm, irq);
266 } 266 }
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index b3d1f0985117..4a12322bf7df 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
169 if (!irq) 169 if (!irq)
170 return; 170 return;
171 171
172 spin_lock_irqsave(&irq->irq_lock, flags); 172 raw_spin_lock_irqsave(&irq->irq_lock, flags);
173 173
174 /* We only care about and preserve Aff0, Aff1 and Aff2. */ 174 /* We only care about and preserve Aff0, Aff1 and Aff2. */
175 irq->mpidr = val & GENMASK(23, 0); 175 irq->mpidr = val & GENMASK(23, 0);
176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); 176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
177 177
178 spin_unlock_irqrestore(&irq->irq_lock, flags); 178 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
179 vgic_put_irq(vcpu->kvm, irq); 179 vgic_put_irq(vcpu->kvm, irq);
180} 180}
181 181
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
281 for (i = 0; i < len * 8; i++) { 281 for (i = 0; i < len * 8; i++) {
282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
283 283
284 spin_lock_irqsave(&irq->irq_lock, flags); 284 raw_spin_lock_irqsave(&irq->irq_lock, flags);
285 if (test_bit(i, &val)) { 285 if (test_bit(i, &val)) {
286 /* 286 /*
287 * pending_latch is set irrespective of irq type 287 * pending_latch is set irrespective of irq type
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
293 } else { 293 } else {
294 irq->pending_latch = false; 294 irq->pending_latch = false;
295 spin_unlock_irqrestore(&irq->irq_lock, flags); 295 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
296 } 296 }
297 297
298 vgic_put_irq(vcpu->kvm, irq); 298 vgic_put_irq(vcpu->kvm, irq);
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
957 957
958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); 958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
959 959
960 spin_lock_irqsave(&irq->irq_lock, flags); 960 raw_spin_lock_irqsave(&irq->irq_lock, flags);
961 961
962 /* 962 /*
963 * An access targetting Group0 SGIs can only generate 963 * An access targetting Group0 SGIs can only generate
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
968 irq->pending_latch = true; 968 irq->pending_latch = true;
969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
970 } else { 970 } else {
971 spin_unlock_irqrestore(&irq->irq_lock, flags); 971 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
972 } 972 }
973 973
974 vgic_put_irq(vcpu->kvm, irq); 974 vgic_put_irq(vcpu->kvm, irq);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ceeda7e04a4d..7de42fba05b5 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
77 for (i = 0; i < len * 8; i++) { 77 for (i = 0; i < len * 8; i++) {
78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
79 79
80 spin_lock_irqsave(&irq->irq_lock, flags); 80 raw_spin_lock_irqsave(&irq->irq_lock, flags);
81 irq->group = !!(val & BIT(i)); 81 irq->group = !!(val & BIT(i));
82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
83 83
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
120 for_each_set_bit(i, &val, len * 8) { 120 for_each_set_bit(i, &val, len * 8) {
121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
122 122
123 spin_lock_irqsave(&irq->irq_lock, flags); 123 raw_spin_lock_irqsave(&irq->irq_lock, flags);
124 irq->enabled = true; 124 irq->enabled = true;
125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
126 126
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
139 for_each_set_bit(i, &val, len * 8) { 139 for_each_set_bit(i, &val, len * 8) {
140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
141 141
142 spin_lock_irqsave(&irq->irq_lock, flags); 142 raw_spin_lock_irqsave(&irq->irq_lock, flags);
143 143
144 irq->enabled = false; 144 irq->enabled = false;
145 145
146 spin_unlock_irqrestore(&irq->irq_lock, flags); 146 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
147 vgic_put_irq(vcpu->kvm, irq); 147 vgic_put_irq(vcpu->kvm, irq);
148 } 148 }
149} 149}
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
161 unsigned long flags; 161 unsigned long flags;
162 162
163 spin_lock_irqsave(&irq->irq_lock, flags); 163 raw_spin_lock_irqsave(&irq->irq_lock, flags);
164 if (irq_is_pending(irq)) 164 if (irq_is_pending(irq))
165 value |= (1U << i); 165 value |= (1U << i);
166 spin_unlock_irqrestore(&irq->irq_lock, flags); 166 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
167 167
168 vgic_put_irq(vcpu->kvm, irq); 168 vgic_put_irq(vcpu->kvm, irq);
169 } 169 }
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215 for_each_set_bit(i, &val, len * 8) { 215 for_each_set_bit(i, &val, len * 8) {
216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
217 217
218 spin_lock_irqsave(&irq->irq_lock, flags); 218 raw_spin_lock_irqsave(&irq->irq_lock, flags);
219 if (irq->hw) 219 if (irq->hw)
220 vgic_hw_irq_spending(vcpu, irq, is_uaccess); 220 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
221 else 221 else
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
262 for_each_set_bit(i, &val, len * 8) { 262 for_each_set_bit(i, &val, len * 8) {
263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
264 264
265 spin_lock_irqsave(&irq->irq_lock, flags); 265 raw_spin_lock_irqsave(&irq->irq_lock, flags);
266 266
267 if (irq->hw) 267 if (irq->hw)
268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess); 268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
269 else 269 else
270 irq->pending_latch = false; 270 irq->pending_latch = false;
271 271
272 spin_unlock_irqrestore(&irq->irq_lock, flags); 272 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
273 vgic_put_irq(vcpu->kvm, irq); 273 vgic_put_irq(vcpu->kvm, irq);
274 } 274 }
275} 275}
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
311 unsigned long flags; 311 unsigned long flags;
312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); 312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
313 313
314 spin_lock_irqsave(&irq->irq_lock, flags); 314 raw_spin_lock_irqsave(&irq->irq_lock, flags);
315 315
316 if (irq->hw) { 316 if (irq->hw) {
317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); 317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
342 if (irq->active) 342 if (irq->active)
343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
344 else 344 else
345 spin_unlock_irqrestore(&irq->irq_lock, flags); 345 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
346} 346}
347 347
348/* 348/*
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
485 for (i = 0; i < len; i++) { 485 for (i = 0; i < len; i++) {
486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
487 487
488 spin_lock_irqsave(&irq->irq_lock, flags); 488 raw_spin_lock_irqsave(&irq->irq_lock, flags);
489 /* Narrow the priority range to what we actually support */ 489 /* Narrow the priority range to what we actually support */
490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); 490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
491 spin_unlock_irqrestore(&irq->irq_lock, flags); 491 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
492 492
493 vgic_put_irq(vcpu->kvm, irq); 493 vgic_put_irq(vcpu->kvm, irq);
494 } 494 }
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
534 continue; 534 continue;
535 535
536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
537 spin_lock_irqsave(&irq->irq_lock, flags); 537 raw_spin_lock_irqsave(&irq->irq_lock, flags);
538 538
539 if (test_bit(i * 2 + 1, &val)) 539 if (test_bit(i * 2 + 1, &val))
540 irq->config = VGIC_CONFIG_EDGE; 540 irq->config = VGIC_CONFIG_EDGE;
541 else 541 else
542 irq->config = VGIC_CONFIG_LEVEL; 542 irq->config = VGIC_CONFIG_LEVEL;
543 543
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 544 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 545 vgic_put_irq(vcpu->kvm, irq);
546 } 546 }
547} 547}
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
590 * restore irq config before line level. 590 * restore irq config before line level.
591 */ 591 */
592 new_level = !!(val & (1U << i)); 592 new_level = !!(val & (1U << i));
593 spin_lock_irqsave(&irq->irq_lock, flags); 593 raw_spin_lock_irqsave(&irq->irq_lock, flags);
594 irq->line_level = new_level; 594 irq->line_level = new_level;
595 if (new_level) 595 if (new_level)
596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
597 else 597 else
598 spin_unlock_irqrestore(&irq->irq_lock, flags); 598 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
599 599
600 vgic_put_irq(vcpu->kvm, irq); 600 vgic_put_irq(vcpu->kvm, irq);
601 } 601 }
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892abd7dc..d91a8938aa7c 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
84 84
85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
86 86
87 spin_lock(&irq->irq_lock); 87 raw_spin_lock(&irq->irq_lock);
88 88
89 /* Always preserve the active bit */ 89 /* Always preserve the active bit */
90 irq->active = !!(val & GICH_LR_ACTIVE_BIT); 90 irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
127 vgic_irq_set_phys_active(irq, false); 127 vgic_irq_set_phys_active(irq, false);
128 } 128 }
129 129
130 spin_unlock(&irq->irq_lock); 130 raw_spin_unlock(&irq->irq_lock);
131 vgic_put_irq(vcpu->kvm, irq); 131 vgic_put_irq(vcpu->kvm, irq);
132 } 132 }
133 133
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9c0dd234ebe8..4ee0aeb9a905 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
76 if (!irq) /* An LPI could have been unmapped. */ 76 if (!irq) /* An LPI could have been unmapped. */
77 continue; 77 continue;
78 78
79 spin_lock(&irq->irq_lock); 79 raw_spin_lock(&irq->irq_lock);
80 80
81 /* Always preserve the active bit */ 81 /* Always preserve the active bit */
82 irq->active = !!(val & ICH_LR_ACTIVE_BIT); 82 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
119 vgic_irq_set_phys_active(irq, false); 119 vgic_irq_set_phys_active(irq, false);
120 } 120 }
121 121
122 spin_unlock(&irq->irq_lock); 122 raw_spin_unlock(&irq->irq_lock);
123 vgic_put_irq(vcpu->kvm, irq); 123 vgic_put_irq(vcpu->kvm, irq);
124 } 124 }
125 125
@@ -347,9 +347,9 @@ retry:
347 347
348 status = val & (1 << bit_nr); 348 status = val & (1 << bit_nr);
349 349
350 spin_lock_irqsave(&irq->irq_lock, flags); 350 raw_spin_lock_irqsave(&irq->irq_lock, flags);
351 if (irq->target_vcpu != vcpu) { 351 if (irq->target_vcpu != vcpu) {
352 spin_unlock_irqrestore(&irq->irq_lock, flags); 352 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
353 goto retry; 353 goto retry;
354 } 354 }
355 irq->pending_latch = status; 355 irq->pending_latch = status;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 870b1185173b..abd9c7352677 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
54 * When taking more than one ap_list_lock at the same time, always take the 54 * When taking more than one ap_list_lock at the same time, always take the
55 * lowest numbered VCPU's ap_list_lock first, so: 55 * lowest numbered VCPU's ap_list_lock first, so:
56 * vcpuX->vcpu_id < vcpuY->vcpu_id: 56 * vcpuX->vcpu_id < vcpuY->vcpu_id:
57 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); 57 * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
58 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); 58 * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
59 * 59 *
60 * Since the VGIC must support injecting virtual interrupts from ISRs, we have 60 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
61 * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer 61 * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
62 * spinlocks for any lock that may be taken while injecting an interrupt. 62 * spinlocks for any lock that may be taken while injecting an interrupt.
63 */ 63 */
64 64
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
72 struct vgic_irq *irq = NULL; 72 struct vgic_irq *irq = NULL;
73 unsigned long flags; 73 unsigned long flags;
74 74
75 spin_lock_irqsave(&dist->lpi_list_lock, flags); 75 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
76 76
77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
78 if (irq->intid != intid) 78 if (irq->intid != intid)
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
88 irq = NULL; 88 irq = NULL;
89 89
90out_unlock: 90out_unlock:
91 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 91 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
92 92
93 return irq; 93 return irq;
94} 94}
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
138 if (irq->intid < VGIC_MIN_LPI) 138 if (irq->intid < VGIC_MIN_LPI)
139 return; 139 return;
140 140
141 spin_lock_irqsave(&dist->lpi_list_lock, flags); 141 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
142 if (!kref_put(&irq->refcount, vgic_irq_release)) { 142 if (!kref_put(&irq->refcount, vgic_irq_release)) {
143 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 143 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
144 return; 144 return;
145 }; 145 };
146 146
147 list_del(&irq->lpi_list); 147 list_del(&irq->lpi_list);
148 dist->lpi_list_count--; 148 dist->lpi_list_count--;
149 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 149 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
150 150
151 kfree(irq); 151 kfree(irq);
152} 152}
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
244 bool penda, pendb; 244 bool penda, pendb;
245 int ret; 245 int ret;
246 246
247 spin_lock(&irqa->irq_lock); 247 raw_spin_lock(&irqa->irq_lock);
248 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 248 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
249 249
250 if (irqa->active || irqb->active) { 250 if (irqa->active || irqb->active) {
251 ret = (int)irqb->active - (int)irqa->active; 251 ret = (int)irqb->active - (int)irqa->active;
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
263 /* Both pending and enabled, sort by priority */ 263 /* Both pending and enabled, sort by priority */
264 ret = irqa->priority - irqb->priority; 264 ret = irqa->priority - irqb->priority;
265out: 265out:
266 spin_unlock(&irqb->irq_lock); 266 raw_spin_unlock(&irqb->irq_lock);
267 spin_unlock(&irqa->irq_lock); 267 raw_spin_unlock(&irqa->irq_lock);
268 return ret; 268 return ret;
269} 269}
270 270
@@ -325,7 +325,7 @@ retry:
325 * not need to be inserted into an ap_list and there is also 325 * not need to be inserted into an ap_list and there is also
326 * no more work for us to do. 326 * no more work for us to do.
327 */ 327 */
328 spin_unlock_irqrestore(&irq->irq_lock, flags); 328 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
329 329
330 /* 330 /*
331 * We have to kick the VCPU here, because we could be 331 * We have to kick the VCPU here, because we could be
@@ -347,12 +347,12 @@ retry:
347 * We must unlock the irq lock to take the ap_list_lock where 347 * We must unlock the irq lock to take the ap_list_lock where
348 * we are going to insert this new pending interrupt. 348 * we are going to insert this new pending interrupt.
349 */ 349 */
350 spin_unlock_irqrestore(&irq->irq_lock, flags); 350 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
351 351
352 /* someone can do stuff here, which we re-check below */ 352 /* someone can do stuff here, which we re-check below */
353 353
354 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 354 raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
355 spin_lock(&irq->irq_lock); 355 raw_spin_lock(&irq->irq_lock);
356 356
357 /* 357 /*
358 * Did something change behind our backs? 358 * Did something change behind our backs?
@@ -367,10 +367,11 @@ retry:
367 */ 367 */
368 368
369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { 369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
370 spin_unlock(&irq->irq_lock); 370 raw_spin_unlock(&irq->irq_lock);
371 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 371 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
372 flags);
372 373
373 spin_lock_irqsave(&irq->irq_lock, flags); 374 raw_spin_lock_irqsave(&irq->irq_lock, flags);
374 goto retry; 375 goto retry;
375 } 376 }
376 377
@@ -382,8 +383,8 @@ retry:
382 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); 383 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
383 irq->vcpu = vcpu; 384 irq->vcpu = vcpu;
384 385
385 spin_unlock(&irq->irq_lock); 386 raw_spin_unlock(&irq->irq_lock);
386 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 387 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
387 388
388 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 389 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
389 kvm_vcpu_kick(vcpu); 390 kvm_vcpu_kick(vcpu);
@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
430 if (!irq) 431 if (!irq)
431 return -EINVAL; 432 return -EINVAL;
432 433
433 spin_lock_irqsave(&irq->irq_lock, flags); 434 raw_spin_lock_irqsave(&irq->irq_lock, flags);
434 435
435 if (!vgic_validate_injection(irq, level, owner)) { 436 if (!vgic_validate_injection(irq, level, owner)) {
436 /* Nothing to see here, move along... */ 437 /* Nothing to see here, move along... */
437 spin_unlock_irqrestore(&irq->irq_lock, flags); 438 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
438 vgic_put_irq(kvm, irq); 439 vgic_put_irq(kvm, irq);
439 return 0; 440 return 0;
440 } 441 }
@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
494 495
495 BUG_ON(!irq); 496 BUG_ON(!irq);
496 497
497 spin_lock_irqsave(&irq->irq_lock, flags); 498 raw_spin_lock_irqsave(&irq->irq_lock, flags);
498 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); 499 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
499 spin_unlock_irqrestore(&irq->irq_lock, flags); 500 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
500 vgic_put_irq(vcpu->kvm, irq); 501 vgic_put_irq(vcpu->kvm, irq);
501 502
502 return ret; 503 return ret;
@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
519 if (!irq->hw) 520 if (!irq->hw)
520 goto out; 521 goto out;
521 522
522 spin_lock_irqsave(&irq->irq_lock, flags); 523 raw_spin_lock_irqsave(&irq->irq_lock, flags);
523 irq->active = false; 524 irq->active = false;
524 irq->pending_latch = false; 525 irq->pending_latch = false;
525 irq->line_level = false; 526 irq->line_level = false;
526 spin_unlock_irqrestore(&irq->irq_lock, flags); 527 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
527out: 528out:
528 vgic_put_irq(vcpu->kvm, irq); 529 vgic_put_irq(vcpu->kvm, irq);
529} 530}
@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
539 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 540 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
540 BUG_ON(!irq); 541 BUG_ON(!irq);
541 542
542 spin_lock_irqsave(&irq->irq_lock, flags); 543 raw_spin_lock_irqsave(&irq->irq_lock, flags);
543 kvm_vgic_unmap_irq(irq); 544 kvm_vgic_unmap_irq(irq);
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 545 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 546 vgic_put_irq(vcpu->kvm, irq);
546 547
547 return 0; 548 return 0;
@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
571 return -EINVAL; 572 return -EINVAL;
572 573
573 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 574 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
574 spin_lock_irqsave(&irq->irq_lock, flags); 575 raw_spin_lock_irqsave(&irq->irq_lock, flags);
575 if (irq->owner && irq->owner != owner) 576 if (irq->owner && irq->owner != owner)
576 ret = -EEXIST; 577 ret = -EEXIST;
577 else 578 else
578 irq->owner = owner; 579 irq->owner = owner;
579 spin_unlock_irqrestore(&irq->irq_lock, flags); 580 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
580 581
581 return ret; 582 return ret;
582} 583}
@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
597 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 598 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
598 599
599retry: 600retry:
600 spin_lock(&vgic_cpu->ap_list_lock); 601 raw_spin_lock(&vgic_cpu->ap_list_lock);
601 602
602 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 603 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
603 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 604 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
604 bool target_vcpu_needs_kick = false; 605 bool target_vcpu_needs_kick = false;
605 606
606 spin_lock(&irq->irq_lock); 607 raw_spin_lock(&irq->irq_lock);
607 608
608 BUG_ON(vcpu != irq->vcpu); 609 BUG_ON(vcpu != irq->vcpu);
609 610
@@ -616,7 +617,7 @@ retry:
616 */ 617 */
617 list_del(&irq->ap_list); 618 list_del(&irq->ap_list);
618 irq->vcpu = NULL; 619 irq->vcpu = NULL;
619 spin_unlock(&irq->irq_lock); 620 raw_spin_unlock(&irq->irq_lock);
620 621
621 /* 622 /*
622 * This vgic_put_irq call matches the 623 * This vgic_put_irq call matches the
@@ -631,14 +632,14 @@ retry:
631 632
632 if (target_vcpu == vcpu) { 633 if (target_vcpu == vcpu) {
633 /* We're on the right CPU */ 634 /* We're on the right CPU */
634 spin_unlock(&irq->irq_lock); 635 raw_spin_unlock(&irq->irq_lock);
635 continue; 636 continue;
636 } 637 }
637 638
638 /* This interrupt looks like it has to be migrated. */ 639 /* This interrupt looks like it has to be migrated. */
639 640
640 spin_unlock(&irq->irq_lock); 641 raw_spin_unlock(&irq->irq_lock);
641 spin_unlock(&vgic_cpu->ap_list_lock); 642 raw_spin_unlock(&vgic_cpu->ap_list_lock);
642 643
643 /* 644 /*
644 * Ensure locking order by always locking the smallest 645 * Ensure locking order by always locking the smallest
@@ -652,10 +653,10 @@ retry:
652 vcpuB = vcpu; 653 vcpuB = vcpu;
653 } 654 }
654 655
655 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); 656 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
656 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, 657 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
657 SINGLE_DEPTH_NESTING); 658 SINGLE_DEPTH_NESTING);
658 spin_lock(&irq->irq_lock); 659 raw_spin_lock(&irq->irq_lock);
659 660
660 /* 661 /*
661 * If the affinity has been preserved, move the 662 * If the affinity has been preserved, move the
@@ -675,9 +676,9 @@ retry:
675 target_vcpu_needs_kick = true; 676 target_vcpu_needs_kick = true;
676 } 677 }
677 678
678 spin_unlock(&irq->irq_lock); 679 raw_spin_unlock(&irq->irq_lock);
679 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 680 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
680 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); 681 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
681 682
682 if (target_vcpu_needs_kick) { 683 if (target_vcpu_needs_kick) {
683 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); 684 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
@@ -687,7 +688,7 @@ retry:
687 goto retry; 688 goto retry;
688 } 689 }
689 690
690 spin_unlock(&vgic_cpu->ap_list_lock); 691 raw_spin_unlock(&vgic_cpu->ap_list_lock);
691} 692}
692 693
693static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) 694static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
741 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 742 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
742 int w; 743 int w;
743 744
744 spin_lock(&irq->irq_lock); 745 raw_spin_lock(&irq->irq_lock);
745 /* GICv2 SGIs can count for more than one... */ 746 /* GICv2 SGIs can count for more than one... */
746 w = vgic_irq_get_lr_count(irq); 747 w = vgic_irq_get_lr_count(irq);
747 spin_unlock(&irq->irq_lock); 748 raw_spin_unlock(&irq->irq_lock);
748 749
749 count += w; 750 count += w;
750 *multi_sgi |= (w > 1); 751 *multi_sgi |= (w > 1);
@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
770 count = 0; 771 count = 0;
771 772
772 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 773 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
773 spin_lock(&irq->irq_lock); 774 raw_spin_lock(&irq->irq_lock);
774 775
775 /* 776 /*
776 * If we have multi-SGIs in the pipeline, we need to 777 * If we have multi-SGIs in the pipeline, we need to
@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
780 * the AP list has been sorted already. 781 * the AP list has been sorted already.
781 */ 782 */
782 if (multi_sgi && irq->priority > prio) { 783 if (multi_sgi && irq->priority > prio) {
783 spin_unlock(&irq->irq_lock); 784 _raw_spin_unlock(&irq->irq_lock);
784 break; 785 break;
785 } 786 }
786 787
@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
791 prio = irq->priority; 792 prio = irq->priority;
792 } 793 }
793 794
794 spin_unlock(&irq->irq_lock); 795 raw_spin_unlock(&irq->irq_lock);
795 796
796 if (count == kvm_vgic_global_state.nr_lr) { 797 if (count == kvm_vgic_global_state.nr_lr) {
797 if (!list_is_last(&irq->ap_list, 798 if (!list_is_last(&irq->ap_list,
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
872 873
873 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 874 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
874 875
875 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); 876 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
876 vgic_flush_lr_state(vcpu); 877 vgic_flush_lr_state(vcpu);
877 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); 878 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
878 879
879 if (can_access_vgic_from_kernel()) 880 if (can_access_vgic_from_kernel())
880 vgic_restore_state(vcpu); 881 vgic_restore_state(vcpu);
@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
918 919
919 vgic_get_vmcr(vcpu, &vmcr); 920 vgic_get_vmcr(vcpu, &vmcr);
920 921
921 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); 922 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
922 923
923 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 924 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
924 spin_lock(&irq->irq_lock); 925 raw_spin_lock(&irq->irq_lock);
925 pending = irq_is_pending(irq) && irq->enabled && 926 pending = irq_is_pending(irq) && irq->enabled &&
926 !irq->active && 927 !irq->active &&
927 irq->priority < vmcr.pmr; 928 irq->priority < vmcr.pmr;
928 spin_unlock(&irq->irq_lock); 929 raw_spin_unlock(&irq->irq_lock);
929 930
930 if (pending) 931 if (pending)
931 break; 932 break;
932 } 933 }
933 934
934 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); 935 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
935 936
936 return pending; 937 return pending;
937} 938}
@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
963 return false; 964 return false;
964 965
965 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 966 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
966 spin_lock_irqsave(&irq->irq_lock, flags); 967 raw_spin_lock_irqsave(&irq->irq_lock, flags);
967 map_is_active = irq->hw && irq->active; 968 map_is_active = irq->hw && irq->active;
968 spin_unlock_irqrestore(&irq->irq_lock, flags); 969 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
969 vgic_put_irq(vcpu->kvm, irq); 970 vgic_put_irq(vcpu->kvm, irq);
970 971
971 return map_is_active; 972 return map_is_active;
972} 973}
973
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1f888a103f78..585845203db8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1227,9 +1227,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
1227{ 1227{
1228 struct kvm_memslots *slots; 1228 struct kvm_memslots *slots;
1229 struct kvm_memory_slot *memslot; 1229 struct kvm_memory_slot *memslot;
1230 int as_id, id, n; 1230 int as_id, id;
1231 gfn_t offset; 1231 gfn_t offset;
1232 unsigned long i; 1232 unsigned long i, n;
1233 unsigned long *dirty_bitmap; 1233 unsigned long *dirty_bitmap;
1234 unsigned long *dirty_bitmap_buffer; 1234 unsigned long *dirty_bitmap_buffer;
1235 1235
@@ -1249,6 +1249,11 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
1249 return -ENOENT; 1249 return -ENOENT;
1250 1250
1251 n = kvm_dirty_bitmap_bytes(memslot); 1251 n = kvm_dirty_bitmap_bytes(memslot);
1252
1253 if (log->first_page > memslot->npages ||
1254 log->num_pages > memslot->npages - log->first_page)
1255 return -EINVAL;
1256
1252 *flush = false; 1257 *flush = false;
1253 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 1258 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1254 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 1259 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
@@ -2995,8 +3000,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
2995 if (ops->init) 3000 if (ops->init)
2996 ops->init(dev); 3001 ops->init(dev);
2997 3002
3003 kvm_get_kvm(kvm);
2998 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 3004 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
2999 if (ret < 0) { 3005 if (ret < 0) {
3006 kvm_put_kvm(kvm);
3000 mutex_lock(&kvm->lock); 3007 mutex_lock(&kvm->lock);
3001 list_del(&dev->vm_node); 3008 list_del(&dev->vm_node);
3002 mutex_unlock(&kvm->lock); 3009 mutex_unlock(&kvm->lock);
@@ -3004,7 +3011,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
3004 return ret; 3011 return ret;
3005 } 3012 }
3006 3013
3007 kvm_get_kvm(kvm);
3008 cd->fd = ret; 3014 cd->fd = ret;
3009 return 0; 3015 return 0;
3010} 3016}