aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS20
-rw-r--r--Documentation/admin-guide/README.rst32
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5433-clock.txt23
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt24
-rw-r--r--Documentation/driver-model/devres.txt2
-rw-r--r--Documentation/networking/dsa/dsa.txt10
-rw-r--r--Documentation/networking/msg_zerocopy.rst2
-rw-r--r--Documentation/networking/operstates.txt14
-rw-r--r--Documentation/networking/switchdev.txt10
-rw-r--r--Documentation/process/applying-patches.rst117
-rw-r--r--Documentation/sysctl/fs.txt4
-rw-r--r--Documentation/translations/it_IT/admin-guide/README.rst2
-rw-r--r--MAINTAINERS26
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/irq.h6
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/Kconfig20
-rw-r--r--arch/arc/configs/nps_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/arcregs.h8
-rw-r--r--arch/arc/include/asm/cache.h11
-rw-r--r--arch/arc/include/asm/entry-arcv2.h54
-rw-r--r--arch/arc/include/asm/uaccess.h8
-rw-r--r--arch/arc/kernel/entry-arcv2.S4
-rw-r--r--arch/arc/kernel/head.S16
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/setup.c119
-rw-r--r--arch/arc/lib/memcpy-archs.S14
-rw-r--r--arch/arc/plat-hsdk/Kconfig1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts4
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts46
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts13
-rw-r--r--arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts85
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts11
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi9
-rw-r--r--arch/arm/boot/dts/omap5-cm-t54.dts12
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra124-nyan.dtsi17
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h10
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h5
-rw-r--r--arch/arm/kernel/irq.c62
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/kvm/reset.c24
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c16
-rw-r--r--arch/arm/mach-omap2/display.c7
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c36
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c16
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-evk.dts44
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts2
-rw-r--r--arch/arm64/include/asm/kvm_host.h11
-rw-r--r--arch/arm64/include/asm/memory.h11
-rw-r--r--arch/arm64/include/asm/neon-intrinsics.h4
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/ptrace.c15
-rw-r--r--arch/arm64/kernel/setup.c4
-rw-r--r--arch/arm64/kvm/hyp/switch.c5
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c5
-rw-r--r--arch/arm64/kvm/reset.c50
-rw-r--r--arch/arm64/kvm/sys_regs.c50
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/csky/include/asm/pgtable.h9
-rw-r--r--arch/csky/include/asm/processor.h4
-rw-r--r--arch/csky/kernel/dumpstack.c4
-rw-r--r--arch/csky/kernel/ptrace.c3
-rw-r--r--arch/csky/kernel/smp.c3
-rw-r--r--arch/csky/mm/ioremap.c14
-rw-r--r--arch/mips/net/ebpf_jit.c24
-rw-r--r--arch/parisc/kernel/ptrace.c29
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h4
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci.c2
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h6
-rw-r--r--arch/riscv/include/asm/pgtable.h8
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S8
-rw-r--r--arch/s390/kernel/swsusp.S4
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/s390/pci/pci.c4
-rw-r--r--arch/sh/boot/dts/Makefile2
-rw-r--r--arch/x86/events/core.c14
-rw-r--r--arch/x86/events/intel/core.c9
-rw-r--r--arch/x86/events/perf_event.h16
-rw-r--r--arch/x86/ia32/ia32_aout.c6
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/uv/bios.h8
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/mmu.c18
-rw-r--r--arch/x86/kvm/vmx/nested.c12
-rw-r--r--arch/x86/kvm/vmx/vmx.c29
-rw-r--r--arch/x86/kvm/vmx/vmx.h10
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/platform/uv/bios_uv.c23
-rw-r--r--block/blk-mq.c12
-rw-r--r--crypto/af_alg.c4
-rw-r--r--drivers/auxdisplay/ht16k33.c2
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/bus/ti-sysc.c6
-rw-r--r--drivers/clk/Kconfig6
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/at91/at91sam9x5.c5
-rw-r--r--drivers/clk/at91/sama5d2.c4
-rw-r--r--drivers/clk/at91/sama5d4.c2
-rw-r--r--drivers/clk/clk-clps711x.c61
-rw-r--r--drivers/clk/clk-devres.c11
-rw-r--r--drivers/clk/clk-fixed-mmio.c101
-rw-r--r--drivers/clk/clk-highbank.c1
-rw-r--r--drivers/clk/clk-max77686.c28
-rw-r--r--drivers/clk/clk-qoriq.c1
-rw-r--r--drivers/clk/clk-twl6040.c53
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/clkdev.c117
-rw-r--r--drivers/clk/imx/clk-imx6q.c1
-rw-r--r--drivers/clk/imx/clk-imx6sx.c1
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c16
-rw-r--r--drivers/clk/imx/clk-vf610.c1
-rw-r--r--drivers/clk/meson/Kconfig101
-rw-r--r--drivers/clk/meson/Makefile29
-rw-r--r--drivers/clk/meson/axg-aoclk.c193
-rw-r--r--drivers/clk/meson/axg-aoclk.h13
-rw-r--r--drivers/clk/meson/axg-audio.c5
-rw-r--r--drivers/clk/meson/axg.c69
-rw-r--r--drivers/clk/meson/clk-dualdiv.c138
-rw-r--r--drivers/clk/meson/clk-dualdiv.h33
-rw-r--r--drivers/clk/meson/clk-input.c7
-rw-r--r--drivers/clk/meson/clk-input.h19
-rw-r--r--drivers/clk/meson/clk-mpll.c12
-rw-r--r--drivers/clk/meson/clk-mpll.h30
-rw-r--r--drivers/clk/meson/clk-phase.c75
-rw-r--r--drivers/clk/meson/clk-phase.h26
-rw-r--r--drivers/clk/meson/clk-pll.c216
-rw-r--r--drivers/clk/meson/clk-pll.h49
-rw-r--r--drivers/clk/meson/clk-regmap.c5
-rw-r--r--drivers/clk/meson/clk-regmap.h20
-rw-r--r--drivers/clk/meson/clk-triphase.c68
-rw-r--r--drivers/clk/meson/clkc.h127
-rw-r--r--drivers/clk/meson/g12a-aoclk.c454
-rw-r--r--drivers/clk/meson/g12a-aoclk.h34
-rw-r--r--drivers/clk/meson/g12a.c2359
-rw-r--r--drivers/clk/meson/g12a.h175
-rw-r--r--drivers/clk/meson/gxbb-aoclk-32k.c193
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c268
-rw-r--r--drivers/clk/meson/gxbb-aoclk.h20
-rw-r--r--drivers/clk/meson/gxbb.c296
-rw-r--r--drivers/clk/meson/meson-aoclk.c54
-rw-r--r--drivers/clk/meson/meson-aoclk.h13
-rw-r--r--drivers/clk/meson/meson-eeclk.c63
-rw-r--r--drivers/clk/meson/meson-eeclk.h25
-rw-r--r--drivers/clk/meson/meson8b.c374
-rw-r--r--drivers/clk/meson/meson8b.h11
-rw-r--r--drivers/clk/meson/parm.h46
-rw-r--r--drivers/clk/meson/sclk-div.c10
-rw-r--r--drivers/clk/meson/sclk-div.h (renamed from drivers/clk/meson/clkc-audio.h)16
-rw-r--r--drivers/clk/meson/vid-pll-div.c10
-rw-r--r--drivers/clk/meson/vid-pll-div.h20
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c5
-rw-r--r--drivers/clk/mvebu/armada-370.c4
-rw-r--r--drivers/clk/mvebu/armada-xp.c4
-rw-r--r--drivers/clk/mvebu/dove.c8
-rw-r--r--drivers/clk/mvebu/kirkwood.c2
-rw-r--r--drivers/clk/mvebu/mv98dx3236.c4
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c15
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c147
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h4
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c13
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c38
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c2
-rw-r--r--drivers/clk/socfpga/clk-gate.c22
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c1
-rw-r--r--drivers/clk/socfpga/clk-pll.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c2
-rw-r--r--drivers/clk/ti/adpll.c2
-rw-r--r--drivers/clk/ti/apll.c4
-rw-r--r--drivers/clk/ti/autoidle.c101
-rw-r--r--drivers/clk/ti/clk.c80
-rw-r--r--drivers/clk/ti/clkctrl.c2
-rw-r--r--drivers/clk/ti/clock.h5
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/ti/divider.c2
-rw-r--r--drivers/clk/ti/dpll.c11
-rw-r--r--drivers/clk/ti/dpll3xxx.c2
-rw-r--r--drivers/clk/ti/gate.c2
-rw-r--r--drivers/clk/ti/interface.c4
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-cpugear.c2
-rw-r--r--drivers/clk/x86/clk-st.c3
-rw-r--r--drivers/clocksource/timer-ti-dm.c5
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.c7
-rw-r--r--drivers/crypto/ccree/cc_pm.c13
-rw-r--r--drivers/crypto/ccree/cc_pm.h3
-rw-r--r--drivers/firmware/efi/efi.c4
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c7
-rw-r--r--drivers/gpio/gpio-mt7621.c20
-rw-r--r--drivers/gpio/gpio-pxa.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/drm_lease.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c22
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h18
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c238
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c45
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c38
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h9
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c25
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c7
-rw-r--r--drivers/gpu/drm/vkms/vkms_crc.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c7
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h2
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c8
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c6
-rw-r--r--drivers/hwmon/nct6775.c3
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c12
-rw-r--r--drivers/i2c/busses/i2c-cadence.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c15
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c10
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/cap11xx.c35
-rw-r--r--drivers/input/keyboard/matrix_keypad.c2
-rw-r--r--drivers/input/keyboard/qt2160.c69
-rw-r--r--drivers/input/keyboard/st-keyscan.c4
-rw-r--r--drivers/input/misc/apanel.c24
-rw-r--r--drivers/input/misc/bma150.c9
-rw-r--r--drivers/input/misc/pwm-vibra.c19
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/mouse/elantech.c9
-rw-r--r--drivers/input/serio/ps2-gpio.c1
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c4
-rw-r--r--drivers/mailbox/mailbox.c1
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-thin.c55
-rw-r--r--drivers/md/raid1.c28
-rw-r--r--drivers/mmc/core/block.c10
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c3
-rw-r--r--drivers/mmc/host/sunxi-mmc.c26
-rw-r--r--drivers/mtd/devices/powernv_flash.c2
-rw-r--r--drivers/mtd/mtdcore.c1
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/dsa/b53/b53_common.c90
-rw-r--r--drivers/net/dsa/b53/b53_priv.h3
-rw-r--r--drivers/net/dsa/bcm_sf2.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c28
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c149
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c128
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c15
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c21
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c22
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/phy/marvell10g.c6
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/phylink.c19
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/sfp-bus.c2
-rw-r--r--drivers/net/phy/sfp.c30
-rw-r--r--drivers/net/phy/sfp.h2
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c5
-rw-r--r--drivers/net/team/team.c31
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/usb/r8152.c5
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c14
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c46
-rw-r--r--drivers/nvme/host/pci.c8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c8
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/sd.c12
-rw-r--r--drivers/scsi/sd_zbc.c8
-rw-r--r--drivers/thermal/cpu_cooling.c2
-rw-r--r--drivers/thermal/of-thermal.c4
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--fs/binfmt_script.c57
-rw-r--r--fs/ceph/snap.c3
-rw-r--r--fs/ext4/fsync.c13
-rw-r--r--fs/gfs2/glops.c1
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c190
-rw-r--r--fs/gfs2/lops.h4
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/recovery.c123
-rw-r--r--fs/gfs2/recovery.h2
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/inode.c7
-rw-r--r--fs/nfs/nfs4idmap.c31
-rw-r--r--fs/nfs/write.c11
-rw-r--r--fs/nfsd/nfsctl.c4
-rw-r--r--fs/proc/base.c4
-rw-r--r--fs/proc/task_mmu.c22
-rw-r--r--include/asm-generic/shmparam.h (renamed from include/uapi/asm-generic/shmparam.h)0
-rw-r--r--include/dt-bindings/clock/axg-aoclkc.h7
-rw-r--r--include/dt-bindings/clock/exynos5433.h8
-rw-r--r--include/dt-bindings/clock/g12a-aoclkc.h34
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h135
-rw-r--r--include/dt-bindings/clock/gxbb-aoclkc.h7
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h1
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h1
-rw-r--r--include/dt-bindings/clock/r8a774a1-cpg-mssr.h1
-rw-r--r--include/dt-bindings/clock/r8a774c0-cpg-mssr.h1
-rw-r--r--include/dt-bindings/reset/g12a-aoclkc.h18
-rw-r--r--include/keys/request_key_auth-type.h36
-rw-r--r--include/keys/user-type.h2
-rw-r--r--include/kvm/arm_vgic.h6
-rw-r--r--include/linux/clk.h36
-rw-r--r--include/linux/clk/ti.h1
-rw-r--r--include/linux/clkdev.h4
-rw-r--r--include/linux/compiler_attributes.h14
-rw-r--r--include/linux/efi.h7
-rw-r--r--include/linux/key-type.h22
-rw-r--r--include/linux/memblock.h3
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/netdev_features.h24
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--include/linux/phy.h23
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/virtio_net.h19
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/phonet/pep.h5
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/xfrm.h12
-rw-r--r--include/uapi/linux/inet_diag.h16
-rw-r--r--init/initramfs.c6
-rw-r--r--init/main.c3
-rw-r--r--kernel/bpf/lpm_trie.c1
-rw-r--r--kernel/bpf/stackmap.c8
-rw-r--r--kernel/bpf/verifier.c11
-rw-r--r--kernel/events/core.c16
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/sched/psi.c2
-rw-r--r--kernel/signal.c7
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_kprobe.c10
-rw-r--r--kernel/trace/trace_probe_tmpl.h6
-rw-r--r--lib/assoc_array.c8
-rw-r--r--lib/crc32.c4
-rw-r--r--mm/debug.c4
-rw-r--r--mm/gup.c3
-rw-r--r--mm/kasan/Makefile2
-rw-r--r--mm/kasan/common.c29
-rw-r--r--mm/kasan/tags.c2
-rw-r--r--mm/kmemleak.c10
-rw-r--r--mm/memblock.c11
-rw-r--r--mm/memory_hotplug.c27
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/page_alloc.c20
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/shmem.c10
-rw-r--r--mm/slab.c15
-rw-r--r--mm/slab.h7
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/slub.c59
-rw-r--r--mm/swap.c17
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmscan.c10
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/bpf/test_run.c45
-rw-r--r--net/bridge/br_multicast.c9
-rw-r--r--net/ceph/messenger.c15
-rw-r--r--net/compat.c6
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/filter.c12
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dsa/port.c7
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/inet_diag.c10
-rw-r--r--net/ipv4/inetpeer.c1
-rw-r--r--net/ipv4/ip_gre.c33
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic_main.c7
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/fou6.c2
-rw-r--r--net/ipv6/ip6_gre.c73
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c1
-rw-r--r--net/ipv6/route.c32
-rw-r--r--net/ipv6/seg6.c4
-rw-r--r--net/ipv6/udp.c12
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/key/af_key.c42
-rw-r--r--net/mac80211/agg-tx.c4
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/mesh.h6
-rw-r--r--net/mac80211/mesh_pathtbl.c157
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/mac80211/util.c6
-rw-r--r--net/netfilter/ipvs/Kconfig1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c11
-rw-r--r--net/netfilter/nf_tables_api.c3
-rw-r--r--net/netfilter/nft_compat.c3
-rw-r--r--net/netfilter/x_tables.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/phonet/pep.c32
-rw-r--r--net/sched/cls_tcindex.c80
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sctp/diag.c1
-rw-r--r--net/sctp/offload.c1
-rw-r--r--net/sctp/stream.c4
-rw-r--r--net/sctp/transport.c3
-rw-r--r--net/smc/smc.h6
-rw-r--r--net/smc/smc_cdc.c4
-rw-r--r--net/smc/smc_cdc.h19
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c49
-rw-r--r--net/sunrpc/debugfs.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/tipc/link.c17
-rw-r--r--net/tipc/msg.h22
-rw-r--r--net/tipc/node.c11
-rw-r--r--net/tipc/socket.c11
-rw-r--r--net/unix/af_unix.c57
-rw-r--r--net/unix/diag.c3
-rw-r--r--net/vmw_vsock/vmci_transport.c4
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/pmsr.c26
-rw-r--r--net/wireless/util.c35
-rw-r--r--net/x25/af_x25.c19
-rw-r--r--net/xdp/xdp_umem.c11
-rw-r--r--net/xdp/xsk.c20
-rw-r--r--net/xfrm/xfrm_interface.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_state.c30
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--scripts/kallsyms.c4
-rw-r--r--security/keys/internal.h13
-rw-r--r--security/keys/key.c5
-rw-r--r--security/keys/keyctl.c1
-rw-r--r--security/keys/keyring.c4
-rw-r--r--security/keys/proc.c3
-rw-r--r--security/keys/process_keys.c1
-rw-r--r--security/keys/request_key.c73
-rw-r--r--security/keys/request_key_auth.c18
-rw-r--r--security/lsm_audit.c10
-rw-r--r--sound/core/pcm_lib.c20
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c42
-rw-r--r--sound/soc/codecs/hdmi-codec.c4
-rw-r--r--sound/soc/codecs/rt5682.c2
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/samsung/i2s.c18
-rw-r--r--sound/soc/sh/rcar/core.c8
-rw-r--r--sound/soc/sh/rcar/ssi.c2
-rw-r--r--sound/soc/sh/rcar/ssiu.c2
-rw-r--r--sound/soc/soc-core.c9
-rw-r--r--sound/soc/soc-dapm.c24
-rw-r--r--sound/soc/soc-topology.c13
-rw-r--r--sound/usb/pcm.c9
-rw-r--r--tools/include/uapi/asm/bitsperlong.h2
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c10
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh1
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile3
-rw-r--r--virt/kvm/arm/arm.c10
-rw-r--r--virt/kvm/arm/mmu.c9
-rw-r--r--virt/kvm/arm/psci.c36
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c34
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c118
557 files changed, 9662 insertions, 3436 deletions
diff --git a/CREDITS b/CREDITS
index e818eb6a3e71..0175098d4776 100644
--- a/CREDITS
+++ b/CREDITS
@@ -842,10 +842,9 @@ D: ax25-utils maintainer.
842 842
843N: Helge Deller 843N: Helge Deller
844E: deller@gmx.de 844E: deller@gmx.de
845E: hdeller@redhat.de 845W: http://www.parisc-linux.org/
846D: PA-RISC Linux hacker, LASI-, ASP-, WAX-, LCD/LED-driver 846D: PA-RISC Linux architecture maintainer
847S: Schimmelsrain 1 847D: LASI-, ASP-, WAX-, LCD/LED-driver
848S: D-69231 Rauenberg
849S: Germany 848S: Germany
850 849
851N: Jean Delvare 850N: Jean Delvare
@@ -1361,7 +1360,7 @@ S: Stellenbosch, Western Cape
1361S: South Africa 1360S: South Africa
1362 1361
1363N: Grant Grundler 1362N: Grant Grundler
1364E: grundler@parisc-linux.org 1363E: grantgrundler@gmail.com
1365W: http://obmouse.sourceforge.net/ 1364W: http://obmouse.sourceforge.net/
1366W: http://www.parisc-linux.org/ 1365W: http://www.parisc-linux.org/
1367D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver 1366D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver
@@ -2492,7 +2491,7 @@ S: Syracuse, New York 13206
2492S: USA 2491S: USA
2493 2492
2494N: Kyle McMartin 2493N: Kyle McMartin
2495E: kyle@parisc-linux.org 2494E: kyle@mcmartin.ca
2496D: Linux/PARISC hacker 2495D: Linux/PARISC hacker
2497D: AD1889 sound driver 2496D: AD1889 sound driver
2498S: Ottawa, Canada 2497S: Ottawa, Canada
@@ -3780,14 +3779,13 @@ S: 21513 Conradia Ct
3780S: Cupertino, CA 95014 3779S: Cupertino, CA 95014
3781S: USA 3780S: USA
3782 3781
3783N: Thibaut Varene 3782N: Thibaut Varène
3784E: T-Bone@parisc-linux.org 3783E: hacks+kernel@slashdirt.org
3785W: http://www.parisc-linux.org/~varenet/ 3784W: http://hacks.slashdirt.org/
3786P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063
3787D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits 3785D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits
3788D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there 3786D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there
3789D: AD1889 sound driver 3787D: AD1889 sound driver
3790S: Paris, France 3788S: France
3791 3789
3792N: Heikki Vatiainen 3790N: Heikki Vatiainen
3793E: hessu@cs.tut.fi 3791E: hessu@cs.tut.fi
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 0797eec76be1..47e577264198 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -1,9 +1,9 @@
1.. _readme: 1.. _readme:
2 2
3Linux kernel release 4.x <http://kernel.org/> 3Linux kernel release 5.x <http://kernel.org/>
4============================================= 4=============================================
5 5
6These are the release notes for Linux version 4. Read them carefully, 6These are the release notes for Linux version 5. Read them carefully,
7as they tell you what this is all about, explain how to install the 7as they tell you what this is all about, explain how to install the
8kernel, and what to do if something goes wrong. 8kernel, and what to do if something goes wrong.
9 9
@@ -63,7 +63,7 @@ Installing the kernel source
63 directory where you have permissions (e.g. your home directory) and 63 directory where you have permissions (e.g. your home directory) and
64 unpack it:: 64 unpack it::
65 65
66 xz -cd linux-4.X.tar.xz | tar xvf - 66 xz -cd linux-5.x.tar.xz | tar xvf -
67 67
68 Replace "X" with the version number of the latest kernel. 68 Replace "X" with the version number of the latest kernel.
69 69
@@ -72,26 +72,26 @@ Installing the kernel source
72 files. They should match the library, and not get messed up by 72 files. They should match the library, and not get messed up by
73 whatever the kernel-du-jour happens to be. 73 whatever the kernel-du-jour happens to be.
74 74
75 - You can also upgrade between 4.x releases by patching. Patches are 75 - You can also upgrade between 5.x releases by patching. Patches are
76 distributed in the xz format. To install by patching, get all the 76 distributed in the xz format. To install by patching, get all the
77 newer patch files, enter the top level directory of the kernel source 77 newer patch files, enter the top level directory of the kernel source
78 (linux-4.X) and execute:: 78 (linux-5.x) and execute::
79 79
80 xz -cd ../patch-4.x.xz | patch -p1 80 xz -cd ../patch-5.x.xz | patch -p1
81 81
82 Replace "x" for all versions bigger than the version "X" of your current 82 Replace "x" for all versions bigger than the version "x" of your current
83 source tree, **in_order**, and you should be ok. You may want to remove 83 source tree, **in_order**, and you should be ok. You may want to remove
84 the backup files (some-file-name~ or some-file-name.orig), and make sure 84 the backup files (some-file-name~ or some-file-name.orig), and make sure
85 that there are no failed patches (some-file-name# or some-file-name.rej). 85 that there are no failed patches (some-file-name# or some-file-name.rej).
86 If there are, either you or I have made a mistake. 86 If there are, either you or I have made a mistake.
87 87
88 Unlike patches for the 4.x kernels, patches for the 4.x.y kernels 88 Unlike patches for the 5.x kernels, patches for the 5.x.y kernels
89 (also known as the -stable kernels) are not incremental but instead apply 89 (also known as the -stable kernels) are not incremental but instead apply
90 directly to the base 4.x kernel. For example, if your base kernel is 4.0 90 directly to the base 5.x kernel. For example, if your base kernel is 5.0
91 and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1 91 and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1
92 and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and 92 and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and
93 want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is, 93 want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is,
94 patch -R) **before** applying the 4.0.3 patch. You can read more on this in 94 patch -R) **before** applying the 5.0.3 patch. You can read more on this in
95 :ref:`Documentation/process/applying-patches.rst <applying_patches>`. 95 :ref:`Documentation/process/applying-patches.rst <applying_patches>`.
96 96
97 Alternatively, the script patch-kernel can be used to automate this 97 Alternatively, the script patch-kernel can be used to automate this
@@ -114,7 +114,7 @@ Installing the kernel source
114Software requirements 114Software requirements
115--------------------- 115---------------------
116 116
117 Compiling and running the 4.x kernels requires up-to-date 117 Compiling and running the 5.x kernels requires up-to-date
118 versions of various software packages. Consult 118 versions of various software packages. Consult
119 :ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers 119 :ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers
120 required and how to get updates for these packages. Beware that using 120 required and how to get updates for these packages. Beware that using
@@ -132,12 +132,12 @@ Build directory for the kernel
132 place for the output files (including .config). 132 place for the output files (including .config).
133 Example:: 133 Example::
134 134
135 kernel source code: /usr/src/linux-4.X 135 kernel source code: /usr/src/linux-5.x
136 build directory: /home/name/build/kernel 136 build directory: /home/name/build/kernel
137 137
138 To configure and build the kernel, use:: 138 To configure and build the kernel, use::
139 139
140 cd /usr/src/linux-4.X 140 cd /usr/src/linux-5.x
141 make O=/home/name/build/kernel menuconfig 141 make O=/home/name/build/kernel menuconfig
142 make O=/home/name/build/kernel 142 make O=/home/name/build/kernel
143 sudo make O=/home/name/build/kernel modules_install install 143 sudo make O=/home/name/build/kernel modules_install install
diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
index 79511d7bb321..c41f0be5d438 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
@@ -10,6 +10,7 @@ Required Properties:
10 - GXL (S905X, S905D) : "amlogic,meson-gxl-aoclkc" 10 - GXL (S905X, S905D) : "amlogic,meson-gxl-aoclkc"
11 - GXM (S912) : "amlogic,meson-gxm-aoclkc" 11 - GXM (S912) : "amlogic,meson-gxm-aoclkc"
12 - AXG (A113D, A113X) : "amlogic,meson-axg-aoclkc" 12 - AXG (A113D, A113X) : "amlogic,meson-axg-aoclkc"
13 - G12A (S905X2, S905D2, S905Y2) : "amlogic,meson-g12a-aoclkc"
13 followed by the common "amlogic,meson-gx-aoclkc" 14 followed by the common "amlogic,meson-gx-aoclkc"
14- clocks: list of clock phandle, one for each entry clock-names. 15- clocks: list of clock phandle, one for each entry clock-names.
15- clock-names: should contain the following: 16- clock-names: should contain the following:
diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
index a6871953bf04..5c8b105be4d6 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
@@ -9,6 +9,7 @@ Required Properties:
9 "amlogic,gxbb-clkc" for GXBB SoC, 9 "amlogic,gxbb-clkc" for GXBB SoC,
10 "amlogic,gxl-clkc" for GXL and GXM SoC, 10 "amlogic,gxl-clkc" for GXL and GXM SoC,
11 "amlogic,axg-clkc" for AXG SoC. 11 "amlogic,axg-clkc" for AXG SoC.
12 "amlogic,g12a-clkc" for G12A SoC.
12- clocks : list of clock phandle, one for each entry clock-names. 13- clocks : list of clock phandle, one for each entry clock-names.
13- clock-names : should contain the following: 14- clock-names : should contain the following:
14 * "xtal": the platform xtal 15 * "xtal": the platform xtal
diff --git a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
index 50d5897c9849..183c327a7d6b 100644
--- a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
@@ -50,6 +50,8 @@ Required Properties:
50 IPs. 50 IPs.
51 - "samsung,exynos5433-cmu-cam1" - clock controller compatible for CMU_CAM1 51 - "samsung,exynos5433-cmu-cam1" - clock controller compatible for CMU_CAM1
52 which generates clocks for Cortex-A5/MIPI_CSIS2/FIMC-LITE_C/FIMC-FD IPs. 52 which generates clocks for Cortex-A5/MIPI_CSIS2/FIMC-LITE_C/FIMC-FD IPs.
53 - "samsung,exynos5433-cmu-imem" - clock controller compatible for CMU_IMEM
54 which generates clocks for SSS (Security SubSystem) and SlimSSS IPs.
53 55
54- reg: physical base address of the controller and length of memory mapped 56- reg: physical base address of the controller and length of memory mapped
55 region. 57 region.
@@ -168,6 +170,12 @@ Required Properties:
168 - aclk_cam1_400 170 - aclk_cam1_400
169 - aclk_cam1_552 171 - aclk_cam1_552
170 172
173 Input clocks for imem clock controller:
174 - oscclk
175 - aclk_imem_sssx_266
176 - aclk_imem_266
177 - aclk_imem_200
178
171Optional properties: 179Optional properties:
172 - power-domains: a phandle to respective power domain node as described by 180 - power-domains: a phandle to respective power domain node as described by
173 generic PM domain bindings (see power/power_domain.txt for more 181 generic PM domain bindings (see power/power_domain.txt for more
@@ -469,6 +477,21 @@ Example 2: Examples of clock controller nodes are listed below.
469 power-domains = <&pd_cam1>; 477 power-domains = <&pd_cam1>;
470 }; 478 };
471 479
480 cmu_imem: clock-controller@11060000 {
481 compatible = "samsung,exynos5433-cmu-imem";
482 reg = <0x11060000 0x1000>;
483 #clock-cells = <1>;
484
485 clock-names = "oscclk",
486 "aclk_imem_sssx_266",
487 "aclk_imem_266",
488 "aclk_imem_200";
489 clocks = <&xxti>,
490 <&cmu_top CLK_DIV_ACLK_IMEM_SSSX_266>,
491 <&cmu_top CLK_DIV_ACLK_IMEM_266>,
492 <&cmu_top CLK_DIV_ACLK_IMEM_200>;
493 };
494
472Example 3: UART controller node that consumes the clock generated by the clock 495Example 3: UART controller node that consumes the clock generated by the clock
473 controller. 496 controller.
474 497
diff --git a/Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt b/Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt
new file mode 100644
index 000000000000..c359367fd1a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt
@@ -0,0 +1,24 @@
1Binding for simple memory mapped io fixed-rate clock sources.
2The driver reads a clock frequency value from a single 32-bit memory mapped
3I/O register and registers it as a fixed rate clock.
4
5It was designed for test systems, like FPGA, not for complete, finished SoCs.
6
7This binding uses the common clock binding[1].
8
9[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
10
11Required properties:
12- compatible : shall be "fixed-mmio-clock".
13- #clock-cells : from common clock binding; shall be set to 0.
14- reg : Address and length of the clock value register set.
15
16Optional properties:
17- clock-output-names : From common clock binding.
18
19Example:
20sysclock: sysclock@fd020004 {
21 #clock-cells = <0>;
22 compatible = "fixed-mmio-clock";
23 reg = <0xfd020004 0x4>;
24};
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index b277cafce71e..d7d6f01e81ff 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -242,9 +242,11 @@ certainly invest a bit more effort into libata core layer).
242 242
243CLOCK 243CLOCK
244 devm_clk_get() 244 devm_clk_get()
245 devm_clk_get_optional()
245 devm_clk_put() 246 devm_clk_put()
246 devm_clk_hw_register() 247 devm_clk_hw_register()
247 devm_of_clk_add_hw_provider() 248 devm_of_clk_add_hw_provider()
249 devm_clk_hw_register_clkdev()
248 250
249DMA 251DMA
250 dmaenginem_async_device_register() 252 dmaenginem_async_device_register()
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 25170ad7d25b..101f2b2c69ad 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -533,16 +533,12 @@ Bridge VLAN filtering
533 function that the driver has to call for each VLAN the given port is a member 533 function that the driver has to call for each VLAN the given port is a member
534 of. A switchdev object is used to carry the VID and bridge flags. 534 of. A switchdev object is used to carry the VID and bridge flags.
535 535
536- port_fdb_prepare: bridge layer function invoked when the bridge prepares the
537 installation of a Forwarding Database entry. If the operation is not
538 supported, this function should return -EOPNOTSUPP to inform the bridge code
539 to fallback to a software implementation. No hardware setup must be done in
540 this function. See port_fdb_add for this and details.
541
542- port_fdb_add: bridge layer function invoked when the bridge wants to install a 536- port_fdb_add: bridge layer function invoked when the bridge wants to install a
543 Forwarding Database entry, the switch hardware should be programmed with the 537 Forwarding Database entry, the switch hardware should be programmed with the
544 specified address in the specified VLAN Id in the forwarding database 538 specified address in the specified VLAN Id in the forwarding database
545 associated with this VLAN ID 539 associated with this VLAN ID. If the operation is not supported, this
540 function should return -EOPNOTSUPP to inform the bridge code to fallback to
541 a software implementation.
546 542
547Note: VLAN ID 0 corresponds to the port private database, which, in the context 543Note: VLAN ID 0 corresponds to the port private database, which, in the context
548of DSA, would be the its port-based VLAN, used by the associated bridge device. 544of DSA, would be the its port-based VLAN, used by the associated bridge device.
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index fe46d4867e2d..18c1415e7bfa 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -7,7 +7,7 @@ Intro
7===== 7=====
8 8
9The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. 9The MSG_ZEROCOPY flag enables copy avoidance for socket send calls.
10The feature is currently implemented for TCP sockets. 10The feature is currently implemented for TCP and UDP sockets.
11 11
12 12
13Opportunity and Caveats 13Opportunity and Caveats
diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt
index 355c6d8ef8ad..b203d1334822 100644
--- a/Documentation/networking/operstates.txt
+++ b/Documentation/networking/operstates.txt
@@ -22,8 +22,9 @@ and changeable from userspace under certain rules.
222. Querying from userspace 222. Querying from userspace
23 23
24Both admin and operational state can be queried via the netlink 24Both admin and operational state can be queried via the netlink
25operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK 25operation RTM_GETLINK. It is also possible to subscribe to RTNLGRP_LINK
26to be notified of updates. This is important for setting from userspace. 26to be notified of updates while the interface is admin up. This is
27important for setting from userspace.
27 28
28These values contain interface state: 29These values contain interface state:
29 30
@@ -101,8 +102,9 @@ because some driver controlled protocol establishment has to
101complete. Corresponding functions are netif_dormant_on() to set the 102complete. Corresponding functions are netif_dormant_on() to set the
102flag, netif_dormant_off() to clear it and netif_dormant() to query. 103flag, netif_dormant_off() to clear it and netif_dormant() to query.
103 104
104On device allocation, networking core sets the flags equivalent to 105On device allocation, both flags __LINK_STATE_NOCARRIER and
105netif_carrier_ok() and !netif_dormant(). 106__LINK_STATE_DORMANT are cleared, so the effective state is equivalent
107to netif_carrier_ok() and !netif_dormant().
106 108
107 109
108Whenever the driver CHANGES one of these flags, a workqueue event is 110Whenever the driver CHANGES one of these flags, a workqueue event is
@@ -133,11 +135,11 @@ netif_carrier_ok() && !netif_dormant() is set by the
133driver. Afterwards, the userspace application can set IFLA_OPERSTATE 135driver. Afterwards, the userspace application can set IFLA_OPERSTATE
134to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set 136to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set
135netif_carrier_off() or netif_dormant_on(). Changes made by userspace 137netif_carrier_off() or netif_dormant_on(). Changes made by userspace
136are multicasted on the netlink group RTMGRP_LINK. 138are multicasted on the netlink group RTNLGRP_LINK.
137 139
138So basically a 802.1X supplicant interacts with the kernel like this: 140So basically a 802.1X supplicant interacts with the kernel like this:
139 141
140-subscribe to RTMGRP_LINK 142-subscribe to RTNLGRP_LINK
141-set IFLA_LINKMODE to 1 via RTM_SETLINK 143-set IFLA_LINKMODE to 1 via RTM_SETLINK
142-query RTM_GETLINK once to get initial state 144-query RTM_GETLINK once to get initial state
143-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until 145-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 82236a17b5e6..97b7ca8b9b86 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -92,11 +92,11 @@ device.
92Switch ID 92Switch ID
93^^^^^^^^^ 93^^^^^^^^^
94 94
95The switchdev driver must implement the switchdev op switchdev_port_attr_get 95The switchdev driver must implement the net_device operation
96for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same 96ndo_get_port_parent_id for each port netdev, returning the same physical ID for
97physical ID for each port of a switch. The ID must be unique between switches 97each port of a switch. The ID must be unique between switches on the same
98on the same system. The ID does not need to be unique between switches on 98system. The ID does not need to be unique between switches on different
99different systems. 99systems.
100 100
101The switch ID is used to locate ports on a switch and to know if aggregated 101The switch ID is used to locate ports on a switch and to know if aggregated
102ports belong to the same switch. 102ports belong to the same switch.
diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst
index dc2ddc345044..fbb9297e6360 100644
--- a/Documentation/process/applying-patches.rst
+++ b/Documentation/process/applying-patches.rst
@@ -216,14 +216,14 @@ You can use the ``interdiff`` program (http://cyberelk.net/tim/patchutils/) to
216generate a patch representing the differences between two patches and then 216generate a patch representing the differences between two patches and then
217apply the result. 217apply the result.
218 218
219This will let you move from something like 4.7.2 to 4.7.3 in a single 219This will let you move from something like 5.7.2 to 5.7.3 in a single
220step. The -z flag to interdiff will even let you feed it patches in gzip or 220step. The -z flag to interdiff will even let you feed it patches in gzip or
221bzip2 compressed form directly without the use of zcat or bzcat or manual 221bzip2 compressed form directly without the use of zcat or bzcat or manual
222decompression. 222decompression.
223 223
224Here's how you'd go from 4.7.2 to 4.7.3 in a single step:: 224Here's how you'd go from 5.7.2 to 5.7.3 in a single step::
225 225
226 interdiff -z ../patch-4.7.2.gz ../patch-4.7.3.gz | patch -p1 226 interdiff -z ../patch-5.7.2.gz ../patch-5.7.3.gz | patch -p1
227 227
228Although interdiff may save you a step or two you are generally advised to 228Although interdiff may save you a step or two you are generally advised to
229do the additional steps since interdiff can get things wrong in some cases. 229do the additional steps since interdiff can get things wrong in some cases.
@@ -245,62 +245,67 @@ The patches are available at http://kernel.org/
245Most recent patches are linked from the front page, but they also have 245Most recent patches are linked from the front page, but they also have
246specific homes. 246specific homes.
247 247
248The 4.x.y (-stable) and 4.x patches live at 248The 5.x.y (-stable) and 5.x patches live at
249 249
250 https://www.kernel.org/pub/linux/kernel/v4.x/ 250 https://www.kernel.org/pub/linux/kernel/v5.x/
251 251
252The -rc patches live at 252The -rc patches are not stored on the webserver but are generated on
253demand from git tags such as
253 254
254 https://www.kernel.org/pub/linux/kernel/v4.x/testing/ 255 https://git.kernel.org/torvalds/p/v5.1-rc1/v5.0
255 256
257The stable -rc patches live at
256 258
257The 4.x kernels 259 https://www.kernel.org/pub/linux/kernel/v5.x/stable-review/
260
261
262The 5.x kernels
258=============== 263===============
259 264
260These are the base stable releases released by Linus. The highest numbered 265These are the base stable releases released by Linus. The highest numbered
261release is the most recent. 266release is the most recent.
262 267
263If regressions or other serious flaws are found, then a -stable fix patch 268If regressions or other serious flaws are found, then a -stable fix patch
264will be released (see below) on top of this base. Once a new 4.x base 269will be released (see below) on top of this base. Once a new 5.x base
265kernel is released, a patch is made available that is a delta between the 270kernel is released, a patch is made available that is a delta between the
266previous 4.x kernel and the new one. 271previous 5.x kernel and the new one.
267 272
268To apply a patch moving from 4.6 to 4.7, you'd do the following (note 273To apply a patch moving from 5.6 to 5.7, you'd do the following (note
269that such patches do **NOT** apply on top of 4.x.y kernels but on top of the 274that such patches do **NOT** apply on top of 5.x.y kernels but on top of the
270base 4.x kernel -- if you need to move from 4.x.y to 4.x+1 you need to 275base 5.x kernel -- if you need to move from 5.x.y to 5.x+1 you need to
271first revert the 4.x.y patch). 276first revert the 5.x.y patch).
272 277
273Here are some examples:: 278Here are some examples::
274 279
275 # moving from 4.6 to 4.7 280 # moving from 5.6 to 5.7
276 281
277 $ cd ~/linux-4.6 # change to kernel source dir 282 $ cd ~/linux-5.6 # change to kernel source dir
278 $ patch -p1 < ../patch-4.7 # apply the 4.7 patch 283 $ patch -p1 < ../patch-5.7 # apply the 5.7 patch
279 $ cd .. 284 $ cd ..
280 $ mv linux-4.6 linux-4.7 # rename source dir 285 $ mv linux-5.6 linux-5.7 # rename source dir
281 286
282 # moving from 4.6.1 to 4.7 287 # moving from 5.6.1 to 5.7
283 288
284 $ cd ~/linux-4.6.1 # change to kernel source dir 289 $ cd ~/linux-5.6.1 # change to kernel source dir
285 $ patch -p1 -R < ../patch-4.6.1 # revert the 4.6.1 patch 290 $ patch -p1 -R < ../patch-5.6.1 # revert the 5.6.1 patch
286 # source dir is now 4.6 291 # source dir is now 5.6
287 $ patch -p1 < ../patch-4.7 # apply new 4.7 patch 292 $ patch -p1 < ../patch-5.7 # apply new 5.7 patch
288 $ cd .. 293 $ cd ..
289 $ mv linux-4.6.1 linux-4.7 # rename source dir 294 $ mv linux-5.6.1 linux-5.7 # rename source dir
290 295
291 296
292The 4.x.y kernels 297The 5.x.y kernels
293================= 298=================
294 299
295Kernels with 3-digit versions are -stable kernels. They contain small(ish) 300Kernels with 3-digit versions are -stable kernels. They contain small(ish)
296critical fixes for security problems or significant regressions discovered 301critical fixes for security problems or significant regressions discovered
297in a given 4.x kernel. 302in a given 5.x kernel.
298 303
299This is the recommended branch for users who want the most recent stable 304This is the recommended branch for users who want the most recent stable
300kernel and are not interested in helping test development/experimental 305kernel and are not interested in helping test development/experimental
301versions. 306versions.
302 307
303If no 4.x.y kernel is available, then the highest numbered 4.x kernel is 308If no 5.x.y kernel is available, then the highest numbered 5.x kernel is
304the current stable kernel. 309the current stable kernel.
305 310
306.. note:: 311.. note::
@@ -308,23 +313,23 @@ the current stable kernel.
308 The -stable team usually do make incremental patches available as well 313 The -stable team usually do make incremental patches available as well
309 as patches against the latest mainline release, but I only cover the 314 as patches against the latest mainline release, but I only cover the
310 non-incremental ones below. The incremental ones can be found at 315 non-incremental ones below. The incremental ones can be found at
311 https://www.kernel.org/pub/linux/kernel/v4.x/incr/ 316 https://www.kernel.org/pub/linux/kernel/v5.x/incr/
312 317
313These patches are not incremental, meaning that for example the 4.7.3 318These patches are not incremental, meaning that for example the 5.7.3
314patch does not apply on top of the 4.7.2 kernel source, but rather on top 319patch does not apply on top of the 5.7.2 kernel source, but rather on top
315of the base 4.7 kernel source. 320of the base 5.7 kernel source.
316 321
317So, in order to apply the 4.7.3 patch to your existing 4.7.2 kernel 322So, in order to apply the 5.7.3 patch to your existing 5.7.2 kernel
318source you have to first back out the 4.7.2 patch (so you are left with a 323source you have to first back out the 5.7.2 patch (so you are left with a
319base 4.7 kernel source) and then apply the new 4.7.3 patch. 324base 5.7 kernel source) and then apply the new 5.7.3 patch.
320 325
321Here's a small example:: 326Here's a small example::
322 327
323 $ cd ~/linux-4.7.2 # change to the kernel source dir 328 $ cd ~/linux-5.7.2 # change to the kernel source dir
324 $ patch -p1 -R < ../patch-4.7.2 # revert the 4.7.2 patch 329 $ patch -p1 -R < ../patch-5.7.2 # revert the 5.7.2 patch
325 $ patch -p1 < ../patch-4.7.3 # apply the new 4.7.3 patch 330 $ patch -p1 < ../patch-5.7.3 # apply the new 5.7.3 patch
326 $ cd .. 331 $ cd ..
327 $ mv linux-4.7.2 linux-4.7.3 # rename the kernel source dir 332 $ mv linux-5.7.2 linux-5.7.3 # rename the kernel source dir
328 333
329The -rc kernels 334The -rc kernels
330=============== 335===============
@@ -343,38 +348,38 @@ This is a good branch to run for people who want to help out testing
343development kernels but do not want to run some of the really experimental 348development kernels but do not want to run some of the really experimental
344stuff (such people should see the sections about -next and -mm kernels below). 349stuff (such people should see the sections about -next and -mm kernels below).
345 350
346The -rc patches are not incremental, they apply to a base 4.x kernel, just 351The -rc patches are not incremental, they apply to a base 5.x kernel, just
347like the 4.x.y patches described above. The kernel version before the -rcN 352like the 5.x.y patches described above. The kernel version before the -rcN
348suffix denotes the version of the kernel that this -rc kernel will eventually 353suffix denotes the version of the kernel that this -rc kernel will eventually
349turn into. 354turn into.
350 355
351So, 4.8-rc5 means that this is the fifth release candidate for the 4.8 356So, 5.8-rc5 means that this is the fifth release candidate for the 5.8
352kernel and the patch should be applied on top of the 4.7 kernel source. 357kernel and the patch should be applied on top of the 5.7 kernel source.
353 358
354Here are 3 examples of how to apply these patches:: 359Here are 3 examples of how to apply these patches::
355 360
356 # first an example of moving from 4.7 to 4.8-rc3 361 # first an example of moving from 5.7 to 5.8-rc3
357 362
358 $ cd ~/linux-4.7 # change to the 4.7 source dir 363 $ cd ~/linux-5.7 # change to the 5.7 source dir
359 $ patch -p1 < ../patch-4.8-rc3 # apply the 4.8-rc3 patch 364 $ patch -p1 < ../patch-5.8-rc3 # apply the 5.8-rc3 patch
360 $ cd .. 365 $ cd ..
361 $ mv linux-4.7 linux-4.8-rc3 # rename the source dir 366 $ mv linux-5.7 linux-5.8-rc3 # rename the source dir
362 367
363 # now let's move from 4.8-rc3 to 4.8-rc5 368 # now let's move from 5.8-rc3 to 5.8-rc5
364 369
365 $ cd ~/linux-4.8-rc3 # change to the 4.8-rc3 dir 370 $ cd ~/linux-5.8-rc3 # change to the 5.8-rc3 dir
366 $ patch -p1 -R < ../patch-4.8-rc3 # revert the 4.8-rc3 patch 371 $ patch -p1 -R < ../patch-5.8-rc3 # revert the 5.8-rc3 patch
367 $ patch -p1 < ../patch-4.8-rc5 # apply the new 4.8-rc5 patch 372 $ patch -p1 < ../patch-5.8-rc5 # apply the new 5.8-rc5 patch
368 $ cd .. 373 $ cd ..
369 $ mv linux-4.8-rc3 linux-4.8-rc5 # rename the source dir 374 $ mv linux-5.8-rc3 linux-5.8-rc5 # rename the source dir
370 375
371 # finally let's try and move from 4.7.3 to 4.8-rc5 376 # finally let's try and move from 5.7.3 to 5.8-rc5
372 377
373 $ cd ~/linux-4.7.3 # change to the kernel source dir 378 $ cd ~/linux-5.7.3 # change to the kernel source dir
374 $ patch -p1 -R < ../patch-4.7.3 # revert the 4.7.3 patch 379 $ patch -p1 -R < ../patch-5.7.3 # revert the 5.7.3 patch
375 $ patch -p1 < ../patch-4.8-rc5 # apply new 4.8-rc5 patch 380 $ patch -p1 < ../patch-5.8-rc5 # apply new 5.8-rc5 patch
376 $ cd .. 381 $ cd ..
377 $ mv linux-4.7.3 linux-4.8-rc5 # rename the kernel source dir 382 $ mv linux-5.7.3 linux-5.8-rc5 # rename the kernel source dir
378 383
379 384
380The -mm patches and the linux-next tree 385The -mm patches and the linux-next tree
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 58649bd4fcfc..ebc679bcb2dc 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -80,7 +80,9 @@ nonzero when shrink_dcache_pages() has been called and the
80dcache isn't pruned yet. 80dcache isn't pruned yet.
81 81
82nr_negative shows the number of unused dentries that are also 82nr_negative shows the number of unused dentries that are also
83negative dentries which do not mapped to actual files. 83negative dentries which do not map to any files. Instead,
84they help speeding up rejection of non-existing files provided
85by the users.
84 86
85============================================================== 87==============================================================
86 88
diff --git a/Documentation/translations/it_IT/admin-guide/README.rst b/Documentation/translations/it_IT/admin-guide/README.rst
index 80f5ffc94a9e..b37166817842 100644
--- a/Documentation/translations/it_IT/admin-guide/README.rst
+++ b/Documentation/translations/it_IT/admin-guide/README.rst
@@ -4,7 +4,7 @@
4 4
5.. _it_readme: 5.. _it_readme:
6 6
7Rilascio del kernel Linux 4.x <http://kernel.org/> 7Rilascio del kernel Linux 5.x <http://kernel.org/>
8=================================================== 8===================================================
9 9
10.. warning:: 10.. warning::
diff --git a/MAINTAINERS b/MAINTAINERS
index 9919840d54cd..dce5c099f43c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -409,8 +409,7 @@ F: drivers/platform/x86/wmi.c
409F: include/uapi/linux/wmi.h 409F: include/uapi/linux/wmi.h
410 410
411AD1889 ALSA SOUND DRIVER 411AD1889 ALSA SOUND DRIVER
412M: Thibaut Varene <T-Bone@parisc-linux.org> 412W: https://parisc.wiki.kernel.org/index.php/AD1889
413W: http://wiki.parisc-linux.org/AD1889
414L: linux-parisc@vger.kernel.org 413L: linux-parisc@vger.kernel.org
415S: Maintained 414S: Maintained
416F: sound/pci/ad1889.* 415F: sound/pci/ad1889.*
@@ -2852,7 +2851,7 @@ R: Martin KaFai Lau <kafai@fb.com>
2852R: Song Liu <songliubraving@fb.com> 2851R: Song Liu <songliubraving@fb.com>
2853R: Yonghong Song <yhs@fb.com> 2852R: Yonghong Song <yhs@fb.com>
2854L: netdev@vger.kernel.org 2853L: netdev@vger.kernel.org
2855L: linux-kernel@vger.kernel.org 2854L: bpf@vger.kernel.org
2856T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git 2855T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
2857T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git 2856T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
2858Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 2857Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
@@ -2882,6 +2881,7 @@ N: bpf
2882BPF JIT for ARM 2881BPF JIT for ARM
2883M: Shubham Bansal <illusionist.neo@gmail.com> 2882M: Shubham Bansal <illusionist.neo@gmail.com>
2884L: netdev@vger.kernel.org 2883L: netdev@vger.kernel.org
2884L: bpf@vger.kernel.org
2885S: Maintained 2885S: Maintained
2886F: arch/arm/net/ 2886F: arch/arm/net/
2887 2887
@@ -2890,18 +2890,21 @@ M: Daniel Borkmann <daniel@iogearbox.net>
2890M: Alexei Starovoitov <ast@kernel.org> 2890M: Alexei Starovoitov <ast@kernel.org>
2891M: Zi Shen Lim <zlim.lnx@gmail.com> 2891M: Zi Shen Lim <zlim.lnx@gmail.com>
2892L: netdev@vger.kernel.org 2892L: netdev@vger.kernel.org
2893L: bpf@vger.kernel.org
2893S: Supported 2894S: Supported
2894F: arch/arm64/net/ 2895F: arch/arm64/net/
2895 2896
2896BPF JIT for MIPS (32-BIT AND 64-BIT) 2897BPF JIT for MIPS (32-BIT AND 64-BIT)
2897M: Paul Burton <paul.burton@mips.com> 2898M: Paul Burton <paul.burton@mips.com>
2898L: netdev@vger.kernel.org 2899L: netdev@vger.kernel.org
2900L: bpf@vger.kernel.org
2899S: Maintained 2901S: Maintained
2900F: arch/mips/net/ 2902F: arch/mips/net/
2901 2903
2902BPF JIT for NFP NICs 2904BPF JIT for NFP NICs
2903M: Jakub Kicinski <jakub.kicinski@netronome.com> 2905M: Jakub Kicinski <jakub.kicinski@netronome.com>
2904L: netdev@vger.kernel.org 2906L: netdev@vger.kernel.org
2907L: bpf@vger.kernel.org
2905S: Supported 2908S: Supported
2906F: drivers/net/ethernet/netronome/nfp/bpf/ 2909F: drivers/net/ethernet/netronome/nfp/bpf/
2907 2910
@@ -2909,6 +2912,7 @@ BPF JIT for POWERPC (32-BIT AND 64-BIT)
2909M: Naveen N. Rao <naveen.n.rao@linux.ibm.com> 2912M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
2910M: Sandipan Das <sandipan@linux.ibm.com> 2913M: Sandipan Das <sandipan@linux.ibm.com>
2911L: netdev@vger.kernel.org 2914L: netdev@vger.kernel.org
2915L: bpf@vger.kernel.org
2912S: Maintained 2916S: Maintained
2913F: arch/powerpc/net/ 2917F: arch/powerpc/net/
2914 2918
@@ -2916,6 +2920,7 @@ BPF JIT for S390
2916M: Martin Schwidefsky <schwidefsky@de.ibm.com> 2920M: Martin Schwidefsky <schwidefsky@de.ibm.com>
2917M: Heiko Carstens <heiko.carstens@de.ibm.com> 2921M: Heiko Carstens <heiko.carstens@de.ibm.com>
2918L: netdev@vger.kernel.org 2922L: netdev@vger.kernel.org
2923L: bpf@vger.kernel.org
2919S: Maintained 2924S: Maintained
2920F: arch/s390/net/ 2925F: arch/s390/net/
2921X: arch/s390/net/pnet.c 2926X: arch/s390/net/pnet.c
@@ -2923,12 +2928,14 @@ X: arch/s390/net/pnet.c
2923BPF JIT for SPARC (32-BIT AND 64-BIT) 2928BPF JIT for SPARC (32-BIT AND 64-BIT)
2924M: David S. Miller <davem@davemloft.net> 2929M: David S. Miller <davem@davemloft.net>
2925L: netdev@vger.kernel.org 2930L: netdev@vger.kernel.org
2931L: bpf@vger.kernel.org
2926S: Maintained 2932S: Maintained
2927F: arch/sparc/net/ 2933F: arch/sparc/net/
2928 2934
2929BPF JIT for X86 32-BIT 2935BPF JIT for X86 32-BIT
2930M: Wang YanQing <udknight@gmail.com> 2936M: Wang YanQing <udknight@gmail.com>
2931L: netdev@vger.kernel.org 2937L: netdev@vger.kernel.org
2938L: bpf@vger.kernel.org
2932S: Maintained 2939S: Maintained
2933F: arch/x86/net/bpf_jit_comp32.c 2940F: arch/x86/net/bpf_jit_comp32.c
2934 2941
@@ -2936,6 +2943,7 @@ BPF JIT for X86 64-BIT
2936M: Alexei Starovoitov <ast@kernel.org> 2943M: Alexei Starovoitov <ast@kernel.org>
2937M: Daniel Borkmann <daniel@iogearbox.net> 2944M: Daniel Borkmann <daniel@iogearbox.net>
2938L: netdev@vger.kernel.org 2945L: netdev@vger.kernel.org
2946L: bpf@vger.kernel.org
2939S: Supported 2947S: Supported
2940F: arch/x86/net/ 2948F: arch/x86/net/
2941X: arch/x86/net/bpf_jit_comp32.c 2949X: arch/x86/net/bpf_jit_comp32.c
@@ -3390,9 +3398,8 @@ F: Documentation/media/v4l-drivers/cafe_ccic*
3390F: drivers/media/platform/marvell-ccic/ 3398F: drivers/media/platform/marvell-ccic/
3391 3399
3392CAIF NETWORK LAYER 3400CAIF NETWORK LAYER
3393M: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
3394L: netdev@vger.kernel.org 3401L: netdev@vger.kernel.org
3395S: Supported 3402S: Orphan
3396F: Documentation/networking/caif/ 3403F: Documentation/networking/caif/
3397F: drivers/net/caif/ 3404F: drivers/net/caif/
3398F: include/uapi/linux/caif/ 3405F: include/uapi/linux/caif/
@@ -6151,7 +6158,7 @@ FREESCALE SOC SOUND DRIVERS
6151M: Timur Tabi <timur@kernel.org> 6158M: Timur Tabi <timur@kernel.org>
6152M: Nicolin Chen <nicoleotsuka@gmail.com> 6159M: Nicolin Chen <nicoleotsuka@gmail.com>
6153M: Xiubo Li <Xiubo.Lee@gmail.com> 6160M: Xiubo Li <Xiubo.Lee@gmail.com>
6154R: Fabio Estevam <fabio.estevam@nxp.com> 6161R: Fabio Estevam <festevam@gmail.com>
6155L: alsa-devel@alsa-project.org (moderated for non-subscribers) 6162L: alsa-devel@alsa-project.org (moderated for non-subscribers)
6156L: linuxppc-dev@lists.ozlabs.org 6163L: linuxppc-dev@lists.ozlabs.org
6157S: Maintained 6164S: Maintained
@@ -8487,6 +8494,7 @@ L7 BPF FRAMEWORK
8487M: John Fastabend <john.fastabend@gmail.com> 8494M: John Fastabend <john.fastabend@gmail.com>
8488M: Daniel Borkmann <daniel@iogearbox.net> 8495M: Daniel Borkmann <daniel@iogearbox.net>
8489L: netdev@vger.kernel.org 8496L: netdev@vger.kernel.org
8497L: bpf@vger.kernel.org
8490S: Maintained 8498S: Maintained
8491F: include/linux/skmsg.h 8499F: include/linux/skmsg.h
8492F: net/core/skmsg.c 8500F: net/core/skmsg.c
@@ -10898,7 +10906,7 @@ F: include/linux/nvmem-consumer.h
10898F: include/linux/nvmem-provider.h 10906F: include/linux/nvmem-provider.h
10899 10907
10900NXP SGTL5000 DRIVER 10908NXP SGTL5000 DRIVER
10901M: Fabio Estevam <fabio.estevam@nxp.com> 10909M: Fabio Estevam <festevam@gmail.com>
10902L: alsa-devel@alsa-project.org (moderated for non-subscribers) 10910L: alsa-devel@alsa-project.org (moderated for non-subscribers)
10903S: Maintained 10911S: Maintained
10904F: Documentation/devicetree/bindings/sound/sgtl5000.txt 10912F: Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -11488,7 +11496,7 @@ F: Documentation/blockdev/paride.txt
11488F: drivers/block/paride/ 11496F: drivers/block/paride/
11489 11497
11490PARISC ARCHITECTURE 11498PARISC ARCHITECTURE
11491M: "James E.J. Bottomley" <jejb@parisc-linux.org> 11499M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
11492M: Helge Deller <deller@gmx.de> 11500M: Helge Deller <deller@gmx.de>
11493L: linux-parisc@vger.kernel.org 11501L: linux-parisc@vger.kernel.org
11494W: http://www.parisc-linux.org/ 11502W: http://www.parisc-linux.org/
@@ -16714,6 +16722,7 @@ M: Jesper Dangaard Brouer <hawk@kernel.org>
16714M: John Fastabend <john.fastabend@gmail.com> 16722M: John Fastabend <john.fastabend@gmail.com>
16715L: netdev@vger.kernel.org 16723L: netdev@vger.kernel.org
16716L: xdp-newbies@vger.kernel.org 16724L: xdp-newbies@vger.kernel.org
16725L: bpf@vger.kernel.org
16717S: Supported 16726S: Supported
16718F: net/core/xdp.c 16727F: net/core/xdp.c
16719F: include/net/xdp.h 16728F: include/net/xdp.h
@@ -16727,6 +16736,7 @@ XDP SOCKETS (AF_XDP)
16727M: Björn Töpel <bjorn.topel@intel.com> 16736M: Björn Töpel <bjorn.topel@intel.com>
16728M: Magnus Karlsson <magnus.karlsson@intel.com> 16737M: Magnus Karlsson <magnus.karlsson@intel.com>
16729L: netdev@vger.kernel.org 16738L: netdev@vger.kernel.org
16739L: bpf@vger.kernel.org
16730S: Maintained 16740S: Maintained
16731F: kernel/bpf/xskmap.c 16741F: kernel/bpf/xskmap.c
16732F: net/xdp/ 16742F: net/xdp/
diff --git a/Makefile b/Makefile
index 86cf35d1d79d..ac5ac28a24e9 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 0 3PATCHLEVEL = 0
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc8
6NAME = Shy Crocodile 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
index 4d17cacd1462..432402c8e47f 100644
--- a/arch/alpha/include/asm/irq.h
+++ b/arch/alpha/include/asm/irq.h
@@ -56,15 +56,15 @@
56 56
57#elif defined(CONFIG_ALPHA_DP264) || \ 57#elif defined(CONFIG_ALPHA_DP264) || \
58 defined(CONFIG_ALPHA_LYNX) || \ 58 defined(CONFIG_ALPHA_LYNX) || \
59 defined(CONFIG_ALPHA_SHARK) || \ 59 defined(CONFIG_ALPHA_SHARK)
60 defined(CONFIG_ALPHA_EIGER)
61# define NR_IRQS 64 60# define NR_IRQS 64
62 61
63#elif defined(CONFIG_ALPHA_TITAN) 62#elif defined(CONFIG_ALPHA_TITAN)
64#define NR_IRQS 80 63#define NR_IRQS 80
65 64
66#elif defined(CONFIG_ALPHA_RAWHIDE) || \ 65#elif defined(CONFIG_ALPHA_RAWHIDE) || \
67 defined(CONFIG_ALPHA_TAKARA) 66 defined(CONFIG_ALPHA_TAKARA) || \
67 defined(CONFIG_ALPHA_EIGER)
68# define NR_IRQS 128 68# define NR_IRQS 128
69 69
70#elif defined(CONFIG_ALPHA_WILDFIRE) 70#elif defined(CONFIG_ALPHA_WILDFIRE)
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index d73dc473fbb9..188fc9256baf 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
78/* Macro for exception fixup code to access integer registers. */ 78/* Macro for exception fixup code to access integer registers. */
79#define dpf_reg(r) \ 79#define dpf_reg(r) \
80 (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ 80 (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
81 (r) <= 18 ? (r)+8 : (r)-10]) 81 (r) <= 18 ? (r)+10 : (r)-10])
82 82
83asmlinkage void 83asmlinkage void
84do_page_fault(unsigned long address, unsigned long mmcsr, 84do_page_fault(unsigned long address, unsigned long mmcsr,
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 376366a7db81..d750b302d5ab 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -191,7 +191,6 @@ config NR_CPUS
191 191
192config ARC_SMP_HALT_ON_RESET 192config ARC_SMP_HALT_ON_RESET
193 bool "Enable Halt-on-reset boot mode" 193 bool "Enable Halt-on-reset boot mode"
194 default y if ARC_UBOOT_SUPPORT
195 help 194 help
196 In SMP configuration cores can be configured as Halt-on-reset 195 In SMP configuration cores can be configured as Halt-on-reset
197 or they could all start at same time. For Halt-on-reset, non 196 or they could all start at same time. For Halt-on-reset, non
@@ -407,6 +406,14 @@ config ARC_HAS_ACCL_REGS
407 (also referred to as r58:r59). These can also be used by gcc as GPR so 406 (also referred to as r58:r59). These can also be used by gcc as GPR so
408 kernel needs to save/restore per process 407 kernel needs to save/restore per process
409 408
409config ARC_IRQ_NO_AUTOSAVE
410 bool "Disable hardware autosave regfile on interrupts"
411 default n
412 help
413 On HS cores, taken interrupt auto saves the regfile on stack.
414 This is programmable and can be optionally disabled in which case
415 software INTERRUPT_PROLOGUE/EPILGUE do the needed work
416
410endif # ISA_ARCV2 417endif # ISA_ARCV2
411 418
412endmenu # "ARC CPU Configuration" 419endmenu # "ARC CPU Configuration"
@@ -515,17 +522,6 @@ config ARC_DBG_TLB_PARANOIA
515 522
516endif 523endif
517 524
518config ARC_UBOOT_SUPPORT
519 bool "Support uboot arg Handling"
520 help
521 ARC Linux by default checks for uboot provided args as pointers to
522 external cmdline or DTB. This however breaks in absence of uboot,
523 when booting from Metaware debugger directly, as the registers are
524 not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
525 registers look like uboot args to kernel which then chokes.
526 So only enable the uboot arg checking/processing if users are sure
527 of uboot being in play.
528
529config ARC_BUILTIN_DTB_NAME 525config ARC_BUILTIN_DTB_NAME
530 string "Built in DTB" 526 string "Built in DTB"
531 help 527 help
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index 6e84060e7c90..621f59407d76 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5
31# CONFIG_ARC_HAS_LLSC is not set 31# CONFIG_ARC_HAS_LLSC is not set
32CONFIG_ARC_KVADDR_SIZE=402 32CONFIG_ARC_KVADDR_SIZE=402
33CONFIG_ARC_EMUL_UNALIGNED=y 33CONFIG_ARC_EMUL_UNALIGNED=y
34CONFIG_ARC_UBOOT_SUPPORT=y
35CONFIG_PREEMPT=y 34CONFIG_PREEMPT=y
36CONFIG_NET=y 35CONFIG_NET=y
37CONFIG_UNIX=y 36CONFIG_UNIX=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index 1e59a2e9c602..e447ace6fa1c 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y
13CONFIG_ARC_PLAT_AXS10X=y 13CONFIG_ARC_PLAT_AXS10X=y
14CONFIG_AXS103=y 14CONFIG_AXS103=y
15CONFIG_ISA_ARCV2=y 15CONFIG_ISA_ARCV2=y
16CONFIG_ARC_UBOOT_SUPPORT=y
17CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38" 16CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
18CONFIG_PREEMPT=y 17CONFIG_PREEMPT=y
19CONFIG_NET=y 18CONFIG_NET=y
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index b5c3f6c54b03..c82cdb10aaf4 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -15,8 +15,6 @@ CONFIG_AXS103=y
15CONFIG_ISA_ARCV2=y 15CONFIG_ISA_ARCV2=y
16CONFIG_SMP=y 16CONFIG_SMP=y
17# CONFIG_ARC_TIMERS_64BIT is not set 17# CONFIG_ARC_TIMERS_64BIT is not set
18# CONFIG_ARC_SMP_HALT_ON_RESET is not set
19CONFIG_ARC_UBOOT_SUPPORT=y
20CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" 18CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
21CONFIG_PREEMPT=y 19CONFIG_PREEMPT=y
22CONFIG_NET=y 20CONFIG_NET=y
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index f1b86cef0905..a27eafdc8260 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -151,6 +151,14 @@ struct bcr_isa_arcv2 {
151#endif 151#endif
152}; 152};
153 153
154struct bcr_uarch_build_arcv2 {
155#ifdef CONFIG_CPU_BIG_ENDIAN
156 unsigned int pad:8, prod:8, maj:8, min:8;
157#else
158 unsigned int min:8, maj:8, prod:8, pad:8;
159#endif
160};
161
154struct bcr_mpy { 162struct bcr_mpy {
155#ifdef CONFIG_CPU_BIG_ENDIAN 163#ifdef CONFIG_CPU_BIG_ENDIAN
156 unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; 164 unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index f393b663413e..2ad77fb43639 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -52,6 +52,17 @@
52#define cache_line_size() SMP_CACHE_BYTES 52#define cache_line_size() SMP_CACHE_BYTES
53#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES 53#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
54 54
55/*
56 * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
57 * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
58 * alignment for any atomic64_t embedded in buffer.
59 * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
60 * value of 4 (and not 8) in ARC ABI.
61 */
62#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
63#define ARCH_SLAB_MINALIGN 8
64#endif
65
55extern void arc_cache_init(void); 66extern void arc_cache_init(void);
56extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); 67extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
57extern void read_decode_cache_bcr(void); 68extern void read_decode_cache_bcr(void);
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index 309f4e6721b3..225e7df2d8ed 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -17,6 +17,33 @@
17 ; 17 ;
18 ; Now manually save: r12, sp, fp, gp, r25 18 ; Now manually save: r12, sp, fp, gp, r25
19 19
20#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
21.ifnc \called_from, exception
22 st.as r9, [sp, -10] ; save r9 in it's final stack slot
23 sub sp, sp, 12 ; skip JLI, LDI, EI
24
25 PUSH lp_count
26 PUSHAX lp_start
27 PUSHAX lp_end
28 PUSH blink
29
30 PUSH r11
31 PUSH r10
32
33 sub sp, sp, 4 ; skip r9
34
35 PUSH r8
36 PUSH r7
37 PUSH r6
38 PUSH r5
39 PUSH r4
40 PUSH r3
41 PUSH r2
42 PUSH r1
43 PUSH r0
44.endif
45#endif
46
20#ifdef CONFIG_ARC_HAS_ACCL_REGS 47#ifdef CONFIG_ARC_HAS_ACCL_REGS
21 PUSH r59 48 PUSH r59
22 PUSH r58 49 PUSH r58
@@ -86,6 +113,33 @@
86 POP r59 113 POP r59
87#endif 114#endif
88 115
116#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
117.ifnc \called_from, exception
118 POP r0
119 POP r1
120 POP r2
121 POP r3
122 POP r4
123 POP r5
124 POP r6
125 POP r7
126 POP r8
127 POP r9
128 POP r10
129 POP r11
130
131 POP blink
132 POPAX lp_end
133 POPAX lp_start
134
135 POP r9
136 mov lp_count, r9
137
138 add sp, sp, 12 ; skip JLI, LDI, EI
139 ld.as r9, [sp, -10] ; reload r9 which got clobbered
140.endif
141#endif
142
89.endm 143.endm
90 144
91/*------------------------------------------------------------------------*/ 145/*------------------------------------------------------------------------*/
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index c9173c02081c..eabc3efa6c6d 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
207 */ 207 */
208 "=&r" (tmp), "+r" (to), "+r" (from) 208 "=&r" (tmp), "+r" (to), "+r" (from)
209 : 209 :
210 : "lp_count", "lp_start", "lp_end", "memory"); 210 : "lp_count", "memory");
211 211
212 return n; 212 return n;
213 } 213 }
@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
433 */ 433 */
434 "=&r" (tmp), "+r" (to), "+r" (from) 434 "=&r" (tmp), "+r" (to), "+r" (from)
435 : 435 :
436 : "lp_count", "lp_start", "lp_end", "memory"); 436 : "lp_count", "memory");
437 437
438 return n; 438 return n;
439 } 439 }
@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
653 " .previous \n" 653 " .previous \n"
654 : "+r"(d_char), "+r"(res) 654 : "+r"(d_char), "+r"(res)
655 : "i"(0) 655 : "i"(0)
656 : "lp_count", "lp_start", "lp_end", "memory"); 656 : "lp_count", "memory");
657 657
658 return res; 658 return res;
659} 659}
@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
686 " .previous \n" 686 " .previous \n"
687 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) 687 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
688 : "g"(-EFAULT), "r"(count) 688 : "g"(-EFAULT), "r"(count)
689 : "lp_count", "lp_start", "lp_end", "memory"); 689 : "lp_count", "memory");
690 690
691 return res; 691 return res;
692} 692}
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index cc558a25b8fa..562089d62d9d 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -209,7 +209,9 @@ restore_regs:
209;####### Return from Intr ####### 209;####### Return from Intr #######
210 210
211debug_marker_l1: 211debug_marker_l1:
212 bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot 212 ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
213 btst r0, STATUS_DE_BIT ; Z flag set if bit clear
214 bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
213 215
214.Lisr_ret_fast_path: 216.Lisr_ret_fast_path:
215 ; Handle special case #1: (Entry via Exception, Return via IRQ) 217 ; Handle special case #1: (Entry via Exception, Return via IRQ)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 8b90d25a15cc..30e090625916 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -17,6 +17,7 @@
17#include <asm/entry.h> 17#include <asm/entry.h>
18#include <asm/arcregs.h> 18#include <asm/arcregs.h>
19#include <asm/cache.h> 19#include <asm/cache.h>
20#include <asm/irqflags.h>
20 21
21.macro CPU_EARLY_SETUP 22.macro CPU_EARLY_SETUP
22 23
@@ -47,6 +48,15 @@
47 sr r5, [ARC_REG_DC_CTRL] 48 sr r5, [ARC_REG_DC_CTRL]
48 49
491: 501:
51
52#ifdef CONFIG_ISA_ARCV2
53 ; Unaligned access is disabled at reset, so re-enable early as
54 ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
55 ; by default
56 lr r5, [status32]
57 bset r5, r5, STATUS_AD_BIT
58 kflag r5
59#endif
50.endm 60.endm
51 61
52 .section .init.text, "ax",@progbits 62 .section .init.text, "ax",@progbits
@@ -90,15 +100,13 @@ ENTRY(stext)
90 st.ab 0, [r5, 4] 100 st.ab 0, [r5, 4]
911: 1011:
92 102
93#ifdef CONFIG_ARC_UBOOT_SUPPORT
94 ; Uboot - kernel ABI 103 ; Uboot - kernel ABI
95 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 104 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
96 ; r1 = magic number (board identity, unused as of now 105 ; r1 = magic number (always zero as of now)
97 ; r2 = pointer to uboot provided cmdline or external DTB in mem 106 ; r2 = pointer to uboot provided cmdline or external DTB in mem
98 ; These are handled later in setup_arch() 107 ; These are handled later in handle_uboot_args()
99 st r0, [@uboot_tag] 108 st r0, [@uboot_tag]
100 st r2, [@uboot_arg] 109 st r2, [@uboot_arg]
101#endif
102 110
103 ; setup "current" tsk and optionally cache it in dedicated r25 111 ; setup "current" tsk and optionally cache it in dedicated r25
104 mov r9, @init_task 112 mov r9, @init_task
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 067ea362fb3e..cf18b3e5a934 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -49,11 +49,13 @@ void arc_init_IRQ(void)
49 49
50 *(unsigned int *)&ictrl = 0; 50 *(unsigned int *)&ictrl = 0;
51 51
52#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
52 ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ 53 ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
53 ictrl.save_blink = 1; 54 ictrl.save_blink = 1;
54 ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ 55 ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
55 ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ 56 ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
56 ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ 57 ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
58#endif
57 59
58 WRITE_AUX(AUX_IRQ_CTRL, ictrl); 60 WRITE_AUX(AUX_IRQ_CTRL, ictrl);
59 61
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index feb90093e6b1..7b2340996cf8 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -199,20 +199,36 @@ static void read_arc_build_cfg_regs(void)
199 cpu->bpu.ret_stk = 4 << bpu.rse; 199 cpu->bpu.ret_stk = 4 << bpu.rse;
200 200
201 if (cpu->core.family >= 0x54) { 201 if (cpu->core.family >= 0x54) {
202 unsigned int exec_ctrl;
203 202
204 READ_BCR(AUX_EXEC_CTRL, exec_ctrl); 203 struct bcr_uarch_build_arcv2 uarch;
205 cpu->extn.dual_enb = !(exec_ctrl & 1);
206 204
207 /* dual issue always present for this core */ 205 /*
208 cpu->extn.dual = 1; 206 * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
207 * dual issue only (HS4x). But next uarch rev (1:0)
208 * allows it be configured for single issue (HS3x)
209 * Ensure we fiddle with dual issue only on HS4x
210 */
211 READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
212
213 if (uarch.prod == 4) {
214 unsigned int exec_ctrl;
215
216 /* dual issue hardware always present */
217 cpu->extn.dual = 1;
218
219 READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
220
221 /* dual issue hardware enabled ? */
222 cpu->extn.dual_enb = !(exec_ctrl & 1);
223
224 }
209 } 225 }
210 } 226 }
211 227
212 READ_BCR(ARC_REG_AP_BCR, ap); 228 READ_BCR(ARC_REG_AP_BCR, ap);
213 if (ap.ver) { 229 if (ap.ver) {
214 cpu->extn.ap_num = 2 << ap.num; 230 cpu->extn.ap_num = 2 << ap.num;
215 cpu->extn.ap_full = !!ap.min; 231 cpu->extn.ap_full = !ap.min;
216 } 232 }
217 233
218 READ_BCR(ARC_REG_SMART_BCR, bcr); 234 READ_BCR(ARC_REG_SMART_BCR, bcr);
@@ -462,43 +478,78 @@ void setup_processor(void)
462 arc_chk_core_config(); 478 arc_chk_core_config();
463} 479}
464 480
465static inline int is_kernel(unsigned long addr) 481static inline bool uboot_arg_invalid(unsigned long addr)
466{ 482{
467 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) 483 /*
468 return 1; 484 * Check that it is a untranslated address (although MMU is not enabled
469 return 0; 485 * yet, it being a high address ensures this is not by fluke)
486 */
487 if (addr < PAGE_OFFSET)
488 return true;
489
490 /* Check that address doesn't clobber resident kernel image */
491 return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
470} 492}
471 493
472void __init setup_arch(char **cmdline_p) 494#define IGNORE_ARGS "Ignore U-boot args: "
495
496/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
497#define UBOOT_TAG_NONE 0
498#define UBOOT_TAG_CMDLINE 1
499#define UBOOT_TAG_DTB 2
500
501void __init handle_uboot_args(void)
473{ 502{
474#ifdef CONFIG_ARC_UBOOT_SUPPORT 503 bool use_embedded_dtb = true;
475 /* make sure that uboot passed pointer to cmdline/dtb is valid */ 504 bool append_cmdline = false;
476 if (uboot_tag && is_kernel((unsigned long)uboot_arg)) 505
477 panic("Invalid uboot arg\n"); 506 /* check that we know this tag */
478 507 if (uboot_tag != UBOOT_TAG_NONE &&
479 /* See if u-boot passed an external Device Tree blob */ 508 uboot_tag != UBOOT_TAG_CMDLINE &&
480 machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ 509 uboot_tag != UBOOT_TAG_DTB) {
481 if (!machine_desc) 510 pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
482#endif 511 goto ignore_uboot_args;
483 { 512 }
484 /* No, so try the embedded one */ 513
514 if (uboot_tag != UBOOT_TAG_NONE &&
515 uboot_arg_invalid((unsigned long)uboot_arg)) {
516 pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
517 goto ignore_uboot_args;
518 }
519
520 /* see if U-boot passed an external Device Tree blob */
521 if (uboot_tag == UBOOT_TAG_DTB) {
522 machine_desc = setup_machine_fdt((void *)uboot_arg);
523
524 /* external Device Tree blob is invalid - use embedded one */
525 use_embedded_dtb = !machine_desc;
526 }
527
528 if (uboot_tag == UBOOT_TAG_CMDLINE)
529 append_cmdline = true;
530
531ignore_uboot_args:
532
533 if (use_embedded_dtb) {
485 machine_desc = setup_machine_fdt(__dtb_start); 534 machine_desc = setup_machine_fdt(__dtb_start);
486 if (!machine_desc) 535 if (!machine_desc)
487 panic("Embedded DT invalid\n"); 536 panic("Embedded DT invalid\n");
537 }
488 538
489 /* 539 /*
490 * If we are here, it is established that @uboot_arg didn't 540 * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
491 * point to DT blob. Instead if u-boot says it is cmdline, 541 * append processing can only happen after.
492 * append to embedded DT cmdline. 542 */
493 * setup_machine_fdt() would have populated @boot_command_line 543 if (append_cmdline) {
494 */ 544 /* Ensure a whitespace between the 2 cmdlines */
495 if (uboot_tag == 1) { 545 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
496 /* Ensure a whitespace between the 2 cmdlines */ 546 strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
497 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
498 strlcat(boot_command_line, uboot_arg,
499 COMMAND_LINE_SIZE);
500 }
501 } 547 }
548}
549
550void __init setup_arch(char **cmdline_p)
551{
552 handle_uboot_args();
502 553
503 /* Save unparsed command line copy for /proc/cmdline */ 554 /* Save unparsed command line copy for /proc/cmdline */
504 *cmdline_p = boot_command_line; 555 *cmdline_p = boot_command_line;
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index d61044dd8b58..ea14b0bf3116 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -25,15 +25,11 @@
25#endif 25#endif
26 26
27#ifdef CONFIG_ARC_HAS_LL64 27#ifdef CONFIG_ARC_HAS_LL64
28# define PREFETCH_READ(RX) prefetch [RX, 56]
29# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
30# define LOADX(DST,RX) ldd.ab DST, [RX, 8] 28# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
31# define STOREX(SRC,RX) std.ab SRC, [RX, 8] 29# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
32# define ZOLSHFT 5 30# define ZOLSHFT 5
33# define ZOLAND 0x1F 31# define ZOLAND 0x1F
34#else 32#else
35# define PREFETCH_READ(RX) prefetch [RX, 28]
36# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
37# define LOADX(DST,RX) ld.ab DST, [RX, 4] 33# define LOADX(DST,RX) ld.ab DST, [RX, 4]
38# define STOREX(SRC,RX) st.ab SRC, [RX, 4] 34# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
39# define ZOLSHFT 4 35# define ZOLSHFT 4
@@ -41,8 +37,6 @@
41#endif 37#endif
42 38
43ENTRY_CFI(memcpy) 39ENTRY_CFI(memcpy)
44 prefetch [r1] ; Prefetch the read location
45 prefetchw [r0] ; Prefetch the write location
46 mov.f 0, r2 40 mov.f 0, r2
47;;; if size is zero 41;;; if size is zero
48 jz.d [blink] 42 jz.d [blink]
@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
72 lpnz @.Lcopy32_64bytes 66 lpnz @.Lcopy32_64bytes
73 ;; LOOP START 67 ;; LOOP START
74 LOADX (r6, r1) 68 LOADX (r6, r1)
75 PREFETCH_READ (r1)
76 PREFETCH_WRITE (r3)
77 LOADX (r8, r1) 69 LOADX (r8, r1)
78 LOADX (r10, r1) 70 LOADX (r10, r1)
79 LOADX (r4, r1) 71 LOADX (r4, r1)
@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
117 lpnz @.Lcopy8bytes_1 109 lpnz @.Lcopy8bytes_1
118 ;; LOOP START 110 ;; LOOP START
119 ld.ab r6, [r1, 4] 111 ld.ab r6, [r1, 4]
120 prefetch [r1, 28] ;Prefetch the next read location
121 ld.ab r8, [r1,4] 112 ld.ab r8, [r1,4]
122 prefetchw [r3, 32] ;Prefetch the next write location
123 113
124 SHIFT_1 (r7, r6, 24) 114 SHIFT_1 (r7, r6, 24)
125 or r7, r7, r5 115 or r7, r7, r5
@@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
162 lpnz @.Lcopy8bytes_2 152 lpnz @.Lcopy8bytes_2
163 ;; LOOP START 153 ;; LOOP START
164 ld.ab r6, [r1, 4] 154 ld.ab r6, [r1, 4]
165 prefetch [r1, 28] ;Prefetch the next read location
166 ld.ab r8, [r1,4] 155 ld.ab r8, [r1,4]
167 prefetchw [r3, 32] ;Prefetch the next write location
168 156
169 SHIFT_1 (r7, r6, 16) 157 SHIFT_1 (r7, r6, 16)
170 or r7, r7, r5 158 or r7, r7, r5
@@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
204 lpnz @.Lcopy8bytes_3 192 lpnz @.Lcopy8bytes_3
205 ;; LOOP START 193 ;; LOOP START
206 ld.ab r6, [r1, 4] 194 ld.ab r6, [r1, 4]
207 prefetch [r1, 28] ;Prefetch the next read location
208 ld.ab r8, [r1,4] 195 ld.ab r8, [r1,4]
209 prefetchw [r3, 32] ;Prefetch the next write location
210 196
211 SHIFT_1 (r7, r6, 8) 197 SHIFT_1 (r7, r6, 8)
212 or r7, r7, r5 198 or r7, r7, r5
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index f25c085b9874..23e00216e5a5 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK
9 bool "ARC HS Development Kit SOC" 9 bool "ARC HS Development Kit SOC"
10 depends on ISA_ARCV2 10 depends on ISA_ARCV2
11 select ARC_HAS_ACCL_REGS 11 select ARC_HAS_ACCL_REGS
12 select ARC_IRQ_NO_AUTOSAVE
12 select CLK_HSDK 13 select CLK_HSDK
13 select RESET_HSDK 14 select RESET_HSDK
14 select HAVE_PCI 15 select HAVE_PCI
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 664e918e2624..26524b75970a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1400,6 +1400,7 @@ config NR_CPUS
1400config HOTPLUG_CPU 1400config HOTPLUG_CPU
1401 bool "Support for hot-pluggable CPUs" 1401 bool "Support for hot-pluggable CPUs"
1402 depends on SMP 1402 depends on SMP
1403 select GENERIC_IRQ_MIGRATION
1403 help 1404 help
1404 Say Y here to experiment with turning CPUs off and on. CPUs 1405 Say Y here to experiment with turning CPUs off and on. CPUs
1405 can be controlled through /sys/devices/system/cpu. 1406 can be controlled through /sys/devices/system/cpu.
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index b67f5fee1469..dce5be5df97b 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -729,7 +729,7 @@
729 729
730&cpsw_emac0 { 730&cpsw_emac0 {
731 phy-handle = <&ethphy0>; 731 phy-handle = <&ethphy0>;
732 phy-mode = "rgmii-txid"; 732 phy-mode = "rgmii-id";
733}; 733};
734 734
735&tscadc { 735&tscadc {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 172c0224e7f6..b128998097ce 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -651,13 +651,13 @@
651 651
652&cpsw_emac0 { 652&cpsw_emac0 {
653 phy-handle = <&ethphy0>; 653 phy-handle = <&ethphy0>;
654 phy-mode = "rgmii-txid"; 654 phy-mode = "rgmii-id";
655 dual_emac_res_vlan = <1>; 655 dual_emac_res_vlan = <1>;
656}; 656};
657 657
658&cpsw_emac1 { 658&cpsw_emac1 {
659 phy-handle = <&ethphy1>; 659 phy-handle = <&ethphy1>;
660 phy-mode = "rgmii-txid"; 660 phy-mode = "rgmii-id";
661 dual_emac_res_vlan = <2>; 661 dual_emac_res_vlan = <2>;
662}; 662};
663 663
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index f3ac7483afed..5d04dc68cf57 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -144,30 +144,32 @@
144 status = "okay"; 144 status = "okay";
145 }; 145 };
146 146
147 nand@d0000 { 147 nand-controller@d0000 {
148 status = "okay"; 148 status = "okay";
149 label = "pxa3xx_nand-0";
150 num-cs = <1>;
151 marvell,nand-keep-config;
152 nand-on-flash-bbt;
153
154 partitions {
155 compatible = "fixed-partitions";
156 #address-cells = <1>;
157 #size-cells = <1>;
158
159 partition@0 {
160 label = "U-Boot";
161 reg = <0 0x800000>;
162 };
163 partition@800000 {
164 label = "Linux";
165 reg = <0x800000 0x800000>;
166 };
167 partition@1000000 {
168 label = "Filesystem";
169 reg = <0x1000000 0x3f000000>;
170 149
150 nand@0 {
151 reg = <0>;
152 label = "pxa3xx_nand-0";
153 nand-rb = <0>;
154 nand-on-flash-bbt;
155
156 partitions {
157 compatible = "fixed-partitions";
158 #address-cells = <1>;
159 #size-cells = <1>;
160
161 partition@0 {
162 label = "U-Boot";
163 reg = <0 0x800000>;
164 };
165 partition@800000 {
166 label = "Linux";
167 reg = <0x800000 0x800000>;
168 };
169 partition@1000000 {
170 label = "Filesystem";
171 reg = <0x1000000 0x3f000000>;
172 };
171 }; 173 };
172 }; 174 };
173 }; 175 };
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 1139e9469a83..b4cca507cf13 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -160,12 +160,15 @@
160 status = "okay"; 160 status = "okay";
161 }; 161 };
162 162
163 nand@d0000 { 163 nand-controller@d0000 {
164 status = "okay"; 164 status = "okay";
165 label = "pxa3xx_nand-0"; 165
166 num-cs = <1>; 166 nand@0 {
167 marvell,nand-keep-config; 167 reg = <0>;
168 nand-on-flash-bbt; 168 label = "pxa3xx_nand-0";
169 nand-rb = <0>;
170 nand-on-flash-bbt;
171 };
169 }; 172 };
170 }; 173 };
171 174
diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
index bbbb38888bb8..87dcb502f72d 100644
--- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
+++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
@@ -81,49 +81,52 @@
81 81
82 }; 82 };
83 83
84 nand@d0000 { 84 nand-controller@d0000 {
85 status = "okay"; 85 status = "okay";
86 label = "pxa3xx_nand-0";
87 num-cs = <1>;
88 marvell,nand-keep-config;
89 nand-on-flash-bbt;
90
91 partitions {
92 compatible = "fixed-partitions";
93 #address-cells = <1>;
94 #size-cells = <1>;
95
96 partition@0 {
97 label = "u-boot";
98 reg = <0x00000000 0x000e0000>;
99 read-only;
100 };
101
102 partition@e0000 {
103 label = "u-boot-env";
104 reg = <0x000e0000 0x00020000>;
105 read-only;
106 };
107
108 partition@100000 {
109 label = "u-boot-env2";
110 reg = <0x00100000 0x00020000>;
111 read-only;
112 };
113
114 partition@120000 {
115 label = "zImage";
116 reg = <0x00120000 0x00400000>;
117 };
118
119 partition@520000 {
120 label = "initrd";
121 reg = <0x00520000 0x00400000>;
122 };
123 86
124 partition@e00000 { 87 nand@0 {
125 label = "boot"; 88 reg = <0>;
126 reg = <0x00e00000 0x3f200000>; 89 label = "pxa3xx_nand-0";
90 nand-rb = <0>;
91 nand-on-flash-bbt;
92
93 partitions {
94 compatible = "fixed-partitions";
95 #address-cells = <1>;
96 #size-cells = <1>;
97
98 partition@0 {
99 label = "u-boot";
100 reg = <0x00000000 0x000e0000>;
101 read-only;
102 };
103
104 partition@e0000 {
105 label = "u-boot-env";
106 reg = <0x000e0000 0x00020000>;
107 read-only;
108 };
109
110 partition@100000 {
111 label = "u-boot-env2";
112 reg = <0x00100000 0x00020000>;
113 read-only;
114 };
115
116 partition@120000 {
117 label = "zImage";
118 reg = <0x00120000 0x00400000>;
119 };
120
121 partition@520000 {
122 label = "initrd";
123 reg = <0x00520000 0x00400000>;
124 };
125
126 partition@e00000 {
127 label = "boot";
128 reg = <0x00e00000 0x3f200000>;
129 };
127 }; 130 };
128 }; 131 };
129 }; 132 };
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 04758a2a87f0..67d77eee9433 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -644,6 +644,17 @@
644 }; 644 };
645}; 645};
646 646
647/* Configure pwm clock source for timers 8 & 9 */
648&timer8 {
649 assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
650 assigned-clock-parents = <&sys_clkin_ck>;
651};
652
653&timer9 {
654 assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
655 assigned-clock-parents = <&sys_clkin_ck>;
656};
657
647/* 658/*
648 * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for 659 * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
649 * uart1 wakeirq. 660 * uart1 wakeirq.
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index bc853ebeda22..61a06f6add3c 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -317,7 +317,8 @@
317 317
318 palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { 318 palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
319 pinctrl-single,pins = < 319 pinctrl-single,pins = <
320 OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ 320 /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
321 OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
321 >; 322 >;
322 }; 323 };
323 324
@@ -385,7 +386,8 @@
385 386
386 palmas: palmas@48 { 387 palmas: palmas@48 {
387 compatible = "ti,palmas"; 388 compatible = "ti,palmas";
388 interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ 389 /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
390 interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
389 reg = <0x48>; 391 reg = <0x48>;
390 interrupt-controller; 392 interrupt-controller;
391 #interrupt-cells = <2>; 393 #interrupt-cells = <2>;
@@ -651,7 +653,8 @@
651 pinctrl-names = "default"; 653 pinctrl-names = "default";
652 pinctrl-0 = <&twl6040_pins>; 654 pinctrl-0 = <&twl6040_pins>;
653 655
654 interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ 656 /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
657 interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
655 658
656 /* audpwron gpio defined in the board specific dts */ 659 /* audpwron gpio defined in the board specific dts */
657 660
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index 5e21fb430a65..e78d3718f145 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -181,6 +181,13 @@
181 OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ 181 OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */
182 >; 182 >;
183 }; 183 };
184
185 palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
186 pinctrl-single,pins = <
187 /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
188 OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
189 >;
190 };
184}; 191};
185 192
186&omap5_pmx_core { 193&omap5_pmx_core {
@@ -414,8 +421,11 @@
414 421
415 palmas: palmas@48 { 422 palmas: palmas@48 {
416 compatible = "ti,palmas"; 423 compatible = "ti,palmas";
417 interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
418 reg = <0x48>; 424 reg = <0x48>;
425 pinctrl-0 = <&palmas_sys_nirq_pins>;
426 pinctrl-names = "default";
427 /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
428 interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
419 interrupt-controller; 429 interrupt-controller;
420 #interrupt-cells = <2>; 430 #interrupt-cells = <2>;
421 ti,system-power-controller; 431 ti,system-power-controller;
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 4acb501dd3f8..3ed49898f4b2 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -719,7 +719,6 @@
719 pm_qos = <&qos_lcdc0>, 719 pm_qos = <&qos_lcdc0>,
720 <&qos_lcdc1>, 720 <&qos_lcdc1>,
721 <&qos_cif0>, 721 <&qos_cif0>,
722 <&qos_cif1>,
723 <&qos_ipp>, 722 <&qos_ipp>,
724 <&qos_rga>; 723 <&qos_rga>;
725 }; 724 };
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index d5f11d6d987e..bc85b6a166c7 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -13,10 +13,25 @@
13 stdout-path = "serial0:115200n8"; 13 stdout-path = "serial0:115200n8";
14 }; 14 };
15 15
16 memory@80000000 { 16 /*
17 * Note that recent version of the device tree compiler (starting with
18 * version 1.4.2) warn about this node containing a reg property, but
19 * missing a unit-address. However, the bootloader on these Chromebook
20 * devices relies on the full name of this node to be exactly /memory.
21 * Adding the unit-address causes the bootloader to create a /memory
22 * node and write the memory bank configuration to that node, which in
23 * turn leads the kernel to believe that the device has 2 GiB of
24 * memory instead of the amount detected by the bootloader.
25 *
26 * The name of this node is effectively ABI and must not be changed.
27 */
28 memory {
29 device_type = "memory";
17 reg = <0x0 0x80000000 0x0 0x80000000>; 30 reg = <0x0 0x80000000 0x0 0x80000000>;
18 }; 31 };
19 32
33 /delete-node/ memory@80000000;
34
20 host1x@50000000 { 35 host1x@50000000 {
21 hdmi@54280000 { 36 hdmi@54280000 {
22 status = "okay"; 37 status = "okay";
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index c883fcbe93b6..46d41140df27 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -25,7 +25,6 @@
25#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
26struct irqaction; 26struct irqaction;
27struct pt_regs; 27struct pt_regs;
28extern void migrate_irqs(void);
29 28
30extern void asm_do_IRQ(unsigned int, struct pt_regs *); 29extern void asm_do_IRQ(unsigned int, struct pt_regs *);
31void handle_IRQ(unsigned int, struct pt_regs *); 30void handle_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index ca56537b61bc..50e89869178a 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -48,6 +48,7 @@
48#define KVM_REQ_SLEEP \ 48#define KVM_REQ_SLEEP \
49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
51#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
51 52
52DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 53DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
53 54
@@ -147,6 +148,13 @@ struct kvm_cpu_context {
147 148
148typedef struct kvm_cpu_context kvm_cpu_context_t; 149typedef struct kvm_cpu_context kvm_cpu_context_t;
149 150
151struct vcpu_reset_state {
152 unsigned long pc;
153 unsigned long r0;
154 bool be;
155 bool reset;
156};
157
150struct kvm_vcpu_arch { 158struct kvm_vcpu_arch {
151 struct kvm_cpu_context ctxt; 159 struct kvm_cpu_context ctxt;
152 160
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
186 /* Cache some mmu pages needed inside spinlock regions */ 194 /* Cache some mmu pages needed inside spinlock regions */
187 struct kvm_mmu_memory_cache mmu_page_cache; 195 struct kvm_mmu_memory_cache mmu_page_cache;
188 196
197 struct vcpu_reset_state reset_state;
198
189 /* Detect first run of a vcpu */ 199 /* Detect first run of a vcpu */
190 bool has_run_once; 200 bool has_run_once;
191}; 201};
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index c4b1d4fb1797..de2089501b8b 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
76#define S2_PMD_MASK PMD_MASK 76#define S2_PMD_MASK PMD_MASK
77#define S2_PMD_SIZE PMD_SIZE 77#define S2_PMD_SIZE PMD_SIZE
78 78
79static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
80{
81 return true;
82}
83
79#endif /* __ARM_S2_PGTABLE_H_ */ 84#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 9908dacf9229..844861368cd5 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -31,7 +31,6 @@
31#include <linux/smp.h> 31#include <linux/smp.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/ratelimit.h>
35#include <linux/errno.h> 34#include <linux/errno.h>
36#include <linux/list.h> 35#include <linux/list.h>
37#include <linux/kallsyms.h> 36#include <linux/kallsyms.h>
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
109 return nr_irqs; 108 return nr_irqs;
110} 109}
111#endif 110#endif
112
113#ifdef CONFIG_HOTPLUG_CPU
114static bool migrate_one_irq(struct irq_desc *desc)
115{
116 struct irq_data *d = irq_desc_get_irq_data(desc);
117 const struct cpumask *affinity = irq_data_get_affinity_mask(d);
118 struct irq_chip *c;
119 bool ret = false;
120
121 /*
122 * If this is a per-CPU interrupt, or the affinity does not
123 * include this CPU, then we have nothing to do.
124 */
125 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
126 return false;
127
128 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
129 affinity = cpu_online_mask;
130 ret = true;
131 }
132
133 c = irq_data_get_irq_chip(d);
134 if (!c->irq_set_affinity)
135 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
136 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
137 cpumask_copy(irq_data_get_affinity_mask(d), affinity);
138
139 return ret;
140}
141
142/*
143 * The current CPU has been marked offline. Migrate IRQs off this CPU.
144 * If the affinity settings do not allow other CPUs, force them onto any
145 * available CPU.
146 *
147 * Note: we must iterate over all IRQs, whether they have an attached
148 * action structure or not, as we need to get chained interrupts too.
149 */
150void migrate_irqs(void)
151{
152 unsigned int i;
153 struct irq_desc *desc;
154 unsigned long flags;
155
156 local_irq_save(flags);
157
158 for_each_irq_desc(i, desc) {
159 bool affinity_broken;
160
161 raw_spin_lock(&desc->lock);
162 affinity_broken = migrate_one_irq(desc);
163 raw_spin_unlock(&desc->lock);
164
165 if (affinity_broken)
166 pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
167 i, smp_processor_id());
168 }
169
170 local_irq_restore(flags);
171}
172#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3bf82232b1be..1d6f5ea522f4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -254,7 +254,7 @@ int __cpu_disable(void)
254 /* 254 /*
255 * OK - migrate IRQs away from this CPU 255 * OK - migrate IRQs away from this CPU
256 */ 256 */
257 migrate_irqs(); 257 irq_migrate_all_off_this_cpu();
258 258
259 /* 259 /*
260 * Flush user cache and TLB mappings, and then remove this CPU 260 * Flush user cache and TLB mappings, and then remove this CPU
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 222c1635bc7a..e8bd288fd5be 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1450 reset_coproc_regs(vcpu, table, num); 1450 reset_coproc_regs(vcpu, table, num);
1451 1451
1452 for (num = 1; num < NR_CP15_REGS; num++) 1452 for (num = 1; num < NR_CP15_REGS; num++)
1453 if (vcpu_cp15(vcpu, num) == 0x42424242) 1453 WARN(vcpu_cp15(vcpu, num) == 0x42424242,
1454 panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); 1454 "Didn't reset vcpu_cp15(vcpu, %zi)", num);
1455} 1455}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 5ed0c3ee33d6..e53327912adc 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -26,6 +26,7 @@
26#include <asm/cputype.h> 26#include <asm/cputype.h>
27#include <asm/kvm_arm.h> 27#include <asm/kvm_arm.h>
28#include <asm/kvm_coproc.h> 28#include <asm/kvm_coproc.h>
29#include <asm/kvm_emulate.h>
29 30
30#include <kvm/arm_arch_timer.h> 31#include <kvm/arm_arch_timer.h>
31 32
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
69 /* Reset CP15 registers */ 70 /* Reset CP15 registers */
70 kvm_reset_coprocs(vcpu); 71 kvm_reset_coprocs(vcpu);
71 72
73 /*
74 * Additional reset state handling that PSCI may have imposed on us.
75 * Must be done after all the sys_reg reset.
76 */
77 if (READ_ONCE(vcpu->arch.reset_state.reset)) {
78 unsigned long target_pc = vcpu->arch.reset_state.pc;
79
80 /* Gracefully handle Thumb2 entry point */
81 if (target_pc & 1) {
82 target_pc &= ~1UL;
83 vcpu_set_thumb(vcpu);
84 }
85
86 /* Propagate caller endianness */
87 if (vcpu->arch.reset_state.be)
88 kvm_vcpu_set_be(vcpu);
89
90 *vcpu_pc(vcpu) = target_pc;
91 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
92
93 vcpu->arch.reset_state.reset = false;
94 }
95
72 /* Reset arch_timer context */ 96 /* Reset arch_timer context */
73 return kvm_timer_vcpu_reset(vcpu); 97 return kvm_timer_vcpu_reset(vcpu);
74} 98}
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index a8b291f00109..dae514c8276a 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
152 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && 152 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
153 (cx->mpu_logic_state == PWRDM_POWER_OFF); 153 (cx->mpu_logic_state == PWRDM_POWER_OFF);
154 154
155 /* Enter broadcast mode for periodic timers */
156 tick_broadcast_enable();
157
158 /* Enter broadcast mode for one-shot timers */
155 tick_broadcast_enter(); 159 tick_broadcast_enter();
156 160
157 /* 161 /*
@@ -218,15 +222,6 @@ fail:
218 return index; 222 return index;
219} 223}
220 224
221/*
222 * For each cpu, setup the broadcast timer because local timers
223 * stops for the states above C1.
224 */
225static void omap_setup_broadcast_timer(void *arg)
226{
227 tick_broadcast_enable();
228}
229
230static struct cpuidle_driver omap4_idle_driver = { 225static struct cpuidle_driver omap4_idle_driver = {
231 .name = "omap4_idle", 226 .name = "omap4_idle",
232 .owner = THIS_MODULE, 227 .owner = THIS_MODULE,
@@ -319,8 +314,5 @@ int __init omap4_idle_init(void)
319 if (!cpu_clkdm[0] || !cpu_clkdm[1]) 314 if (!cpu_clkdm[0] || !cpu_clkdm[1])
320 return -ENODEV; 315 return -ENODEV;
321 316
322 /* Configure the broadcast timer on each cpu */
323 on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
324
325 return cpuidle_register(idle_driver, cpu_online_mask); 317 return cpuidle_register(idle_driver, cpu_online_mask);
326} 318}
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index f86b72d1d59e..1444b4b4bd9f 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
83 u32 enable_mask, enable_shift; 83 u32 enable_mask, enable_shift;
84 u32 pipd_mask, pipd_shift; 84 u32 pipd_mask, pipd_shift;
85 u32 reg; 85 u32 reg;
86 int ret;
86 87
87 if (dsi_id == 0) { 88 if (dsi_id == 0) {
88 enable_mask = OMAP4_DSI1_LANEENABLE_MASK; 89 enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
@@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
98 return -ENODEV; 99 return -ENODEV;
99 } 100 }
100 101
101 regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg); 102 ret = regmap_read(omap4_dsi_mux_syscon,
103 OMAP4_DSIPHY_SYSCON_OFFSET,
104 &reg);
105 if (ret)
106 return ret;
102 107
103 reg &= ~enable_mask; 108 reg &= ~enable_mask;
104 reg &= ~pipd_mask; 109 reg &= ~pipd_mask;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index fc5fb776a710..17558be4bf0a 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -50,6 +50,9 @@
50#define OMAP4_NR_BANKS 4 50#define OMAP4_NR_BANKS 4
51#define OMAP4_NR_IRQS 128 51#define OMAP4_NR_IRQS 128
52 52
53#define SYS_NIRQ1_EXT_SYS_IRQ_1 7
54#define SYS_NIRQ2_EXT_SYS_IRQ_2 119
55
53static void __iomem *wakeupgen_base; 56static void __iomem *wakeupgen_base;
54static void __iomem *sar_base; 57static void __iomem *sar_base;
55static DEFINE_RAW_SPINLOCK(wakeupgen_lock); 58static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
153 irq_chip_unmask_parent(d); 156 irq_chip_unmask_parent(d);
154} 157}
155 158
159/*
160 * The sys_nirq pins bypass peripheral modules and are wired directly
161 * to MPUSS wakeupgen. They get automatically inverted for GIC.
162 */
163static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
164{
165 bool inverted = false;
166
167 switch (type) {
168 case IRQ_TYPE_LEVEL_LOW:
169 type &= ~IRQ_TYPE_LEVEL_MASK;
170 type |= IRQ_TYPE_LEVEL_HIGH;
171 inverted = true;
172 break;
173 case IRQ_TYPE_EDGE_FALLING:
174 type &= ~IRQ_TYPE_EDGE_BOTH;
175 type |= IRQ_TYPE_EDGE_RISING;
176 inverted = true;
177 break;
178 default:
179 break;
180 }
181
182 if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
183 d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
184 pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
185 d->hwirq);
186
187 return irq_chip_set_type_parent(d, type);
188}
189
156#ifdef CONFIG_HOTPLUG_CPU 190#ifdef CONFIG_HOTPLUG_CPU
157static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); 191static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
158 192
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
446 .irq_mask = wakeupgen_mask, 480 .irq_mask = wakeupgen_mask,
447 .irq_unmask = wakeupgen_unmask, 481 .irq_unmask = wakeupgen_unmask,
448 .irq_retrigger = irq_chip_retrigger_hierarchy, 482 .irq_retrigger = irq_chip_retrigger_hierarchy,
449 .irq_set_type = irq_chip_set_type_parent, 483 .irq_set_type = wakeupgen_irq_set_type,
450 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, 484 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
451#ifdef CONFIG_SMP 485#ifdef CONFIG_SMP
452 .irq_set_affinity = irq_chip_set_affinity_parent, 486 .irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index b5531dd3ae9c..3a04c73ac03c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1002,8 +1002,10 @@ static int _enable_clocks(struct omap_hwmod *oh)
1002 clk_enable(oh->_clk); 1002 clk_enable(oh->_clk);
1003 1003
1004 list_for_each_entry(os, &oh->slave_ports, node) { 1004 list_for_each_entry(os, &oh->slave_ports, node) {
1005 if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) 1005 if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) {
1006 omap2_clk_deny_idle(os->_clk);
1006 clk_enable(os->_clk); 1007 clk_enable(os->_clk);
1008 }
1007 } 1009 }
1008 1010
1009 /* The opt clocks are controlled by the device driver. */ 1011 /* The opt clocks are controlled by the device driver. */
@@ -1055,8 +1057,10 @@ static int _disable_clocks(struct omap_hwmod *oh)
1055 clk_disable(oh->_clk); 1057 clk_disable(oh->_clk);
1056 1058
1057 list_for_each_entry(os, &oh->slave_ports, node) { 1059 list_for_each_entry(os, &oh->slave_ports, node) {
1058 if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) 1060 if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) {
1059 clk_disable(os->_clk); 1061 clk_disable(os->_clk);
1062 omap2_clk_allow_idle(os->_clk);
1063 }
1060 } 1064 }
1061 1065
1062 if (oh->flags & HWMOD_OPT_CLKS_NEEDED) 1066 if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
@@ -2436,9 +2440,13 @@ static void _setup_iclk_autoidle(struct omap_hwmod *oh)
2436 continue; 2440 continue;
2437 2441
2438 if (os->flags & OCPIF_SWSUP_IDLE) { 2442 if (os->flags & OCPIF_SWSUP_IDLE) {
2439 /* XXX omap_iclk_deny_idle(c); */ 2443 /*
2444 * we might have multiple users of one iclk with
2445 * different requirements, disable autoidle when
2446 * the module is enabled, e.g. dss iclk
2447 */
2440 } else { 2448 } else {
2441 /* XXX omap_iclk_allow_idle(c); */ 2449 /* we are enabling autoidle afterwards anyways */
2442 clk_enable(os->_clk); 2450 clk_enable(os->_clk);
2443 } 2451 }
2444 } 2452 }
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f1e2922e447c..1e3e08a1c456 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev)
2390 return; 2390 return;
2391 2391
2392 arm_teardown_iommu_dma_ops(dev); 2392 arm_teardown_iommu_dma_ops(dev);
2393 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
2394 set_dma_ops(dev, NULL);
2393} 2395}
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index 2c118a6ab358..0dc23fc227ed 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
247 } 247 }
248 248
249 /* Copy arch-dep-instance from template. */ 249 /* Copy arch-dep-instance from template. */
250 memcpy(code, (unsigned char *)optprobe_template_entry, 250 memcpy(code, (unsigned long *)&optprobe_template_entry,
251 TMPL_END_IDX * sizeof(kprobe_opcode_t)); 251 TMPL_END_IDX * sizeof(kprobe_opcode_t));
252 252
253 /* Adjust buffer according to instruction. */ 253 /* Adjust buffer according to instruction. */
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index 64acccc4bfcb..f74b13aa5aa5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -227,34 +227,34 @@
227 227
228 pinctrl_usdhc1_100mhz: usdhc1-100grp { 228 pinctrl_usdhc1_100mhz: usdhc1-100grp {
229 fsl,pins = < 229 fsl,pins = <
230 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 230 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d
231 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 231 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd
232 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 232 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd
233 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 233 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd
234 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 234 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd
235 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 235 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd
236 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 236 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd
237 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 237 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd
238 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 238 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd
239 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 239 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd
240 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 240 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d
241 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 241 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
242 >; 242 >;
243 }; 243 };
244 244
245 pinctrl_usdhc1_200mhz: usdhc1-200grp { 245 pinctrl_usdhc1_200mhz: usdhc1-200grp {
246 fsl,pins = < 246 fsl,pins = <
247 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 247 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f
248 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 248 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf
249 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 249 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf
250 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 250 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf
251 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 251 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf
252 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 252 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf
253 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 253 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf
254 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 254 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf
255 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 255 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf
256 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 256 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf
257 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 257 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f
258 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 258 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
259 >; 259 >;
260 }; 260 };
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 8e9d6d5ed7b2..b6d31499fb43 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -360,6 +360,8 @@
360 <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, 360 <&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
361 <&clk IMX8MQ_CLK_USDHC1_ROOT>; 361 <&clk IMX8MQ_CLK_USDHC1_ROOT>;
362 clock-names = "ipg", "ahb", "per"; 362 clock-names = "ipg", "ahb", "per";
363 assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>;
364 assigned-clock-rates = <400000000>;
363 fsl,tuning-start-tap = <20>; 365 fsl,tuning-start-tap = <20>;
364 fsl,tuning-step = <2>; 366 fsl,tuning-step = <2>;
365 bus-width = <4>; 367 bus-width = <4>;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index 5b4a9609e31f..2468762283a5 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -351,7 +351,7 @@
351 reg = <0>; 351 reg = <0>;
352 pinctrl-names = "default"; 352 pinctrl-names = "default";
353 pinctrl-0 = <&cp0_copper_eth_phy_reset>; 353 pinctrl-0 = <&cp0_copper_eth_phy_reset>;
354 reset-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_LOW>; 354 reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>;
355 reset-assert-us = <10000>; 355 reset-assert-us = <10000>;
356 }; 356 };
357 357
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index bd937d68ca3b..040b36ef0dd2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -40,6 +40,7 @@
40 pinctrl-0 = <&usb30_host_drv>; 40 pinctrl-0 = <&usb30_host_drv>;
41 regulator-name = "vcc_host_5v"; 41 regulator-name = "vcc_host_5v";
42 regulator-always-on; 42 regulator-always-on;
43 regulator-boot-on;
43 vin-supply = <&vcc_sys>; 44 vin-supply = <&vcc_sys>;
44 }; 45 };
45 46
@@ -51,6 +52,7 @@
51 pinctrl-0 = <&usb20_host_drv>; 52 pinctrl-0 = <&usb20_host_drv>;
52 regulator-name = "vcc_host1_5v"; 53 regulator-name = "vcc_host1_5v";
53 regulator-always-on; 54 regulator-always-on;
55 regulator-boot-on;
54 vin-supply = <&vcc_sys>; 56 vin-supply = <&vcc_sys>;
55 }; 57 };
56 58
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
index 1ee0dc0d9f10..d1cf404b8708 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
@@ -22,7 +22,7 @@
22 backlight = <&backlight>; 22 backlight = <&backlight>;
23 power-supply = <&pp3300_disp>; 23 power-supply = <&pp3300_disp>;
24 24
25 ports { 25 port {
26 panel_in_edp: endpoint { 26 panel_in_edp: endpoint {
27 remote-endpoint = <&edp_out_panel>; 27 remote-endpoint = <&edp_out_panel>;
28 }; 28 };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
index 81e73103fa78..15e254a77391 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
@@ -43,7 +43,7 @@
43 backlight = <&backlight>; 43 backlight = <&backlight>;
44 power-supply = <&pp3300_disp>; 44 power-supply = <&pp3300_disp>;
45 45
46 ports { 46 port {
47 panel_in_edp: endpoint { 47 panel_in_edp: endpoint {
48 remote-endpoint = <&edp_out_panel>; 48 remote-endpoint = <&edp_out_panel>;
49 }; 49 };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
index 0b8f1edbd746..b48a63c3efc3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
@@ -91,7 +91,7 @@
91 pinctrl-0 = <&lcd_panel_reset>; 91 pinctrl-0 = <&lcd_panel_reset>;
92 power-supply = <&vcc3v3_s0>; 92 power-supply = <&vcc3v3_s0>;
93 93
94 ports { 94 port {
95 panel_in_edp: endpoint { 95 panel_in_edp: endpoint {
96 remote-endpoint = <&edp_out_panel>; 96 remote-endpoint = <&edp_out_panel>;
97 }; 97 };
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7732d0ba4e60..da3fc7324d68 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -48,6 +48,7 @@
48#define KVM_REQ_SLEEP \ 48#define KVM_REQ_SLEEP \
49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
51#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
51 52
52DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 53DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
53 54
@@ -208,6 +209,13 @@ struct kvm_cpu_context {
208 209
209typedef struct kvm_cpu_context kvm_cpu_context_t; 210typedef struct kvm_cpu_context kvm_cpu_context_t;
210 211
212struct vcpu_reset_state {
213 unsigned long pc;
214 unsigned long r0;
215 bool be;
216 bool reset;
217};
218
211struct kvm_vcpu_arch { 219struct kvm_vcpu_arch {
212 struct kvm_cpu_context ctxt; 220 struct kvm_cpu_context ctxt;
213 221
@@ -297,6 +305,9 @@ struct kvm_vcpu_arch {
297 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 305 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
298 u64 vsesr_el2; 306 u64 vsesr_el2;
299 307
308 /* Additional reset state */
309 struct vcpu_reset_state reset_state;
310
300 /* True when deferrable sysregs are loaded on the physical CPU, 311 /* True when deferrable sysregs are loaded on the physical CPU,
301 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ 312 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
302 bool sysregs_loaded_on_cpu; 313 bool sysregs_loaded_on_cpu;
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index e1ec947e7c0c..0c656850eeea 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -332,6 +332,17 @@ static inline void *phys_to_virt(phys_addr_t x)
332#define virt_addr_valid(kaddr) \ 332#define virt_addr_valid(kaddr) \
333 (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) 333 (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
334 334
335/*
336 * Given that the GIC architecture permits ITS implementations that can only be
337 * configured with a LPI table address once, GICv3 systems with many CPUs may
338 * end up reserving a lot of different regions after a kexec for their LPI
339 * tables (one per CPU), as we are forced to reuse the same memory after kexec
340 * (and thus reserve it persistently with EFI beforehand)
341 */
342#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
343# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
344#endif
345
335#include <asm-generic/memory_model.h> 346#include <asm-generic/memory_model.h>
336 347
337#endif 348#endif
diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h
index 2ba6c6b9541f..71abfc7612b2 100644
--- a/arch/arm64/include/asm/neon-intrinsics.h
+++ b/arch/arm64/include/asm/neon-intrinsics.h
@@ -36,4 +36,8 @@
36#include <arm_neon.h> 36#include <arm_neon.h>
37#endif 37#endif
38 38
39#ifdef CONFIG_CC_IS_CLANG
40#pragma clang diagnostic ignored "-Wincompatible-pointer-types"
41#endif
42
39#endif /* __ASM_NEON_INTRINSICS_H */ 43#endif /* __ASM_NEON_INTRINSICS_H */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 15d79a8e5e5e..eecf7927dab0 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -539,8 +539,7 @@ set_hcr:
539 /* GICv3 system register access */ 539 /* GICv3 system register access */
540 mrs x0, id_aa64pfr0_el1 540 mrs x0, id_aa64pfr0_el1
541 ubfx x0, x0, #24, #4 541 ubfx x0, x0, #24, #4
542 cmp x0, #1 542 cbz x0, 3f
543 b.ne 3f
544 543
545 mrs_s x0, SYS_ICC_SRE_EL2 544 mrs_s x0, SYS_ICC_SRE_EL2
546 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 545 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 9dce33b0e260..ddaea0fd2fa4 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1702,19 +1702,20 @@ void syscall_trace_exit(struct pt_regs *regs)
1702} 1702}
1703 1703
1704/* 1704/*
1705 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a 1705 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1706 * We also take into account DIT (bit 24), which is not yet documented, and 1706 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1707 * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be 1707 * not described in ARM DDI 0487D.a.
1708 * allocated an EL0 meaning in future. 1708 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1709 * be allocated an EL0 meaning in future.
1709 * Userspace cannot use these until they have an architectural meaning. 1710 * Userspace cannot use these until they have an architectural meaning.
1710 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 1711 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1711 * We also reserve IL for the kernel; SS is handled dynamically. 1712 * We also reserve IL for the kernel; SS is handled dynamically.
1712 */ 1713 */
1713#define SPSR_EL1_AARCH64_RES0_BITS \ 1714#define SPSR_EL1_AARCH64_RES0_BITS \
1714 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ 1715 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1715 GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) 1716 GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
1716#define SPSR_EL1_AARCH32_RES0_BITS \ 1717#define SPSR_EL1_AARCH32_RES0_BITS \
1717 (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) 1718 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1718 1719
1719static int valid_compat_regs(struct user_pt_regs *regs) 1720static int valid_compat_regs(struct user_pt_regs *regs)
1720{ 1721{
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 4b0e1231625c..009849328289 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p)
313 arm64_memblock_init(); 313 arm64_memblock_init();
314 314
315 paging_init(); 315 paging_init();
316 efi_apply_persistent_mem_reservations();
317 316
318 acpi_table_upgrade(); 317 acpi_table_upgrade();
319 318
@@ -340,6 +339,9 @@ void __init setup_arch(char **cmdline_p)
340 smp_init_cpus(); 339 smp_init_cpus();
341 smp_build_mpidr_hash(); 340 smp_build_mpidr_hash();
342 341
342 /* Init percpu seeds for random tags after cpus are set up. */
343 kasan_init_tags();
344
343#ifdef CONFIG_ARM64_SW_TTBR0_PAN 345#ifdef CONFIG_ARM64_SW_TTBR0_PAN
344 /* 346 /*
345 * Make sure init_thread_info.ttbr0 always generates translation 347 * Make sure init_thread_info.ttbr0 always generates translation
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index b0b1478094b4..421ebf6f7086 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -23,6 +23,7 @@
23#include <kvm/arm_psci.h> 23#include <kvm/arm_psci.h>
24 24
25#include <asm/cpufeature.h> 25#include <asm/cpufeature.h>
26#include <asm/kprobes.h>
26#include <asm/kvm_asm.h> 27#include <asm/kvm_asm.h>
27#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
28#include <asm/kvm_host.h> 29#include <asm/kvm_host.h>
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
107 108
108 write_sysreg(kvm_get_hyp_vector(), vbar_el1); 109 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
109} 110}
111NOKPROBE_SYMBOL(activate_traps_vhe);
110 112
111static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) 113static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
112{ 114{
@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void)
154 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); 156 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
155 write_sysreg(vectors, vbar_el1); 157 write_sysreg(vectors, vbar_el1);
156} 158}
159NOKPROBE_SYMBOL(deactivate_traps_vhe);
157 160
158static void __hyp_text __deactivate_traps_nvhe(void) 161static void __hyp_text __deactivate_traps_nvhe(void)
159{ 162{
@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
513 516
514 return exit_code; 517 return exit_code;
515} 518}
519NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
516 520
517/* Switch to the guest for legacy non-VHE systems */ 521/* Switch to the guest for legacy non-VHE systems */
518int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) 522int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
620 read_sysreg_el2(esr), read_sysreg_el2(far), 624 read_sysreg_el2(esr), read_sysreg_el2(far),
621 read_sysreg(hpfar_el2), par, vcpu); 625 read_sysreg(hpfar_el2), par, vcpu);
622} 626}
627NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
623 628
624void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) 629void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
625{ 630{
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 68d6f7c3b237..b426e2cf973c 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -18,6 +18,7 @@
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/kvm_host.h> 19#include <linux/kvm_host.h>
20 20
21#include <asm/kprobes.h>
21#include <asm/kvm_asm.h> 22#include <asm/kvm_asm.h>
22#include <asm/kvm_emulate.h> 23#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h> 24#include <asm/kvm_hyp.h>
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
98{ 99{
99 __sysreg_save_common_state(ctxt); 100 __sysreg_save_common_state(ctxt);
100} 101}
102NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
101 103
102void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) 104void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
103{ 105{
104 __sysreg_save_common_state(ctxt); 106 __sysreg_save_common_state(ctxt);
105 __sysreg_save_el2_return_state(ctxt); 107 __sysreg_save_el2_return_state(ctxt);
106} 108}
109NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
107 110
108static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) 111static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
109{ 112{
@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
188{ 191{
189 __sysreg_restore_common_state(ctxt); 192 __sysreg_restore_common_state(ctxt);
190} 193}
194NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
191 195
192void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) 196void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
193{ 197{
194 __sysreg_restore_common_state(ctxt); 198 __sysreg_restore_common_state(ctxt);
195 __sysreg_restore_el2_return_state(ctxt); 199 __sysreg_restore_el2_return_state(ctxt);
196} 200}
201NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
197 202
198void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) 203void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
199{ 204{
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index b72a3dd56204..f16a5f8ff2b4 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -32,6 +32,7 @@
32#include <asm/kvm_arm.h> 32#include <asm/kvm_arm.h>
33#include <asm/kvm_asm.h> 33#include <asm/kvm_asm.h>
34#include <asm/kvm_coproc.h> 34#include <asm/kvm_coproc.h>
35#include <asm/kvm_emulate.h>
35#include <asm/kvm_mmu.h> 36#include <asm/kvm_mmu.h>
36 37
37/* Maximum phys_shift supported for any VM on this host */ 38/* Maximum phys_shift supported for any VM on this host */
@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
105 * This function finds the right table above and sets the registers on 106 * This function finds the right table above and sets the registers on
106 * the virtual CPU struct to their architecturally defined reset 107 * the virtual CPU struct to their architecturally defined reset
107 * values. 108 * values.
109 *
110 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
111 * ioctl or as part of handling a request issued by another VCPU in the PSCI
112 * handling code. In the first case, the VCPU will not be loaded, and in the
113 * second case the VCPU will be loaded. Because this function operates purely
114 * on the memory-backed valus of system registers, we want to do a full put if
115 * we were loaded (handling a request) and load the values back at the end of
116 * the function. Otherwise we leave the state alone. In both cases, we
117 * disable preemption around the vcpu reset as we would otherwise race with
118 * preempt notifiers which also call put/load.
108 */ 119 */
109int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 120int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
110{ 121{
111 const struct kvm_regs *cpu_reset; 122 const struct kvm_regs *cpu_reset;
123 int ret = -EINVAL;
124 bool loaded;
125
126 preempt_disable();
127 loaded = (vcpu->cpu != -1);
128 if (loaded)
129 kvm_arch_vcpu_put(vcpu);
112 130
113 switch (vcpu->arch.target) { 131 switch (vcpu->arch.target) {
114 default: 132 default:
115 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 133 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
116 if (!cpu_has_32bit_el1()) 134 if (!cpu_has_32bit_el1())
117 return -EINVAL; 135 goto out;
118 cpu_reset = &default_regs_reset32; 136 cpu_reset = &default_regs_reset32;
119 } else { 137 } else {
120 cpu_reset = &default_regs_reset; 138 cpu_reset = &default_regs_reset;
@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
129 /* Reset system registers */ 147 /* Reset system registers */
130 kvm_reset_sys_regs(vcpu); 148 kvm_reset_sys_regs(vcpu);
131 149
150 /*
151 * Additional reset state handling that PSCI may have imposed on us.
152 * Must be done after all the sys_reg reset.
153 */
154 if (vcpu->arch.reset_state.reset) {
155 unsigned long target_pc = vcpu->arch.reset_state.pc;
156
157 /* Gracefully handle Thumb2 entry point */
158 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
159 target_pc &= ~1UL;
160 vcpu_set_thumb(vcpu);
161 }
162
163 /* Propagate caller endianness */
164 if (vcpu->arch.reset_state.be)
165 kvm_vcpu_set_be(vcpu);
166
167 *vcpu_pc(vcpu) = target_pc;
168 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
169
170 vcpu->arch.reset_state.reset = false;
171 }
172
132 /* Reset PMU */ 173 /* Reset PMU */
133 kvm_pmu_vcpu_reset(vcpu); 174 kvm_pmu_vcpu_reset(vcpu);
134 175
@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
137 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 178 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
138 179
139 /* Reset timer */ 180 /* Reset timer */
140 return kvm_timer_vcpu_reset(vcpu); 181 ret = kvm_timer_vcpu_reset(vcpu);
182out:
183 if (loaded)
184 kvm_arch_vcpu_load(vcpu, smp_processor_id());
185 preempt_enable();
186 return ret;
141} 187}
142 188
143void kvm_set_ipa_limit(void) 189void kvm_set_ipa_limit(void)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e3e37228ae4e..c936aa40c3f4 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
314 return read_zero(vcpu, p); 314 return read_zero(vcpu, p);
315} 315}
316 316
317static bool trap_undef(struct kvm_vcpu *vcpu, 317/*
318 struct sys_reg_params *p, 318 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
319 const struct sys_reg_desc *r) 319 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
320 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
321 * treat it separately.
322 */
323static bool trap_loregion(struct kvm_vcpu *vcpu,
324 struct sys_reg_params *p,
325 const struct sys_reg_desc *r)
320{ 326{
321 kvm_inject_undefined(vcpu); 327 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
322 return false; 328 u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
329 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
330
331 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
332 kvm_inject_undefined(vcpu);
333 return false;
334 }
335
336 if (p->is_write && sr == SYS_LORID_EL1)
337 return write_to_read_only(vcpu, p, r);
338
339 return trap_raz_wi(vcpu, p, r);
323} 340}
324 341
325static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 342static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
1048 if (val & ptrauth_mask) 1065 if (val & ptrauth_mask)
1049 kvm_debug("ptrauth unsupported for guests, suppressing\n"); 1066 kvm_debug("ptrauth unsupported for guests, suppressing\n");
1050 val &= ~ptrauth_mask; 1067 val &= ~ptrauth_mask;
1051 } else if (id == SYS_ID_AA64MMFR1_EL1) {
1052 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
1053 kvm_debug("LORegions unsupported for guests, suppressing\n");
1054
1055 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
1056 } 1068 }
1057 1069
1058 return val; 1070 return val;
@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
1338 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, 1350 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1339 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 1351 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1340 1352
1341 { SYS_DESC(SYS_LORSA_EL1), trap_undef }, 1353 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1342 { SYS_DESC(SYS_LOREA_EL1), trap_undef }, 1354 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1343 { SYS_DESC(SYS_LORN_EL1), trap_undef }, 1355 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1344 { SYS_DESC(SYS_LORC_EL1), trap_undef }, 1356 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1345 { SYS_DESC(SYS_LORID_EL1), trap_undef }, 1357 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1346 1358
1347 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, 1359 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1348 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, 1360 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2596 table = get_target_table(vcpu->arch.target, true, &num); 2608 table = get_target_table(vcpu->arch.target, true, &num);
2597 reset_sys_reg_descs(vcpu, table, num); 2609 reset_sys_reg_descs(vcpu, table, num);
2598 2610
2599 for (num = 1; num < NR_SYS_REGS; num++) 2611 for (num = 1; num < NR_SYS_REGS; num++) {
2600 if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) 2612 if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
2601 panic("Didn't reset __vcpu_sys_reg(%zi)", num); 2613 "Didn't reset __vcpu_sys_reg(%zi)\n", num))
2614 break;
2615 }
2602} 2616}
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 4b55b15707a3..f37a86d2a69d 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -252,8 +252,6 @@ void __init kasan_init(void)
252 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); 252 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
253 cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 253 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
254 254
255 kasan_init_tags();
256
257 /* At this point kasan is fully initialized. Enable error messages */ 255 /* At this point kasan is fully initialized. Enable error messages */
258 init_task.kasan_depth = 0; 256 init_task.kasan_depth = 0;
259 pr_info("KernelAddressSanitizer initialized\n"); 257 pr_info("KernelAddressSanitizer initialized\n");
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index edfcbb25fd9f..dcea277c09ae 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -45,8 +45,8 @@
45 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) 45 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address))
46#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 46#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
47#define pte_clear(mm, addr, ptep) set_pte((ptep), \ 47#define pte_clear(mm, addr, ptep) set_pte((ptep), \
48 (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) 48 (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
49#define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) 49#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
50#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 50#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
51#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) 51#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
52#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ 52#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
@@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte)
241 241
242#define pgd_index(address) ((address) >> PGDIR_SHIFT) 242#define pgd_index(address) ((address) >> PGDIR_SHIFT)
243 243
244#define __HAVE_PHYS_MEM_ACCESS_PROT
245struct file;
246extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
247 unsigned long size, pgprot_t vma_prot);
248
244/* 249/*
245 * Macro to make mark a page protection value as "uncacheable". Note 250 * Macro to make mark a page protection value as "uncacheable". Note
246 * that "protection" is really a misnomer here as the protection value 251 * that "protection" is really a misnomer here as the protection value
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
index 8f454810514f..21e0bd5293dd 100644
--- a/arch/csky/include/asm/processor.h
+++ b/arch/csky/include/asm/processor.h
@@ -49,7 +49,7 @@ struct thread_struct {
49}; 49};
50 50
51#define INIT_THREAD { \ 51#define INIT_THREAD { \
52 .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ 52 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
53 .sr = DEFAULT_PSR_VALUE, \ 53 .sr = DEFAULT_PSR_VALUE, \
54} 54}
55 55
@@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p);
95#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) 95#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp)
96 96
97#define task_pt_regs(p) \ 97#define task_pt_regs(p) \
98 ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) 98 ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
99 99
100#define cpu_relax() barrier() 100#define cpu_relax() barrier()
101 101
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c
index 659253e9989c..d67f9777cfd9 100644
--- a/arch/csky/kernel/dumpstack.c
+++ b/arch/csky/kernel/dumpstack.c
@@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack)
38 if (task) 38 if (task)
39 stack = (unsigned long *)thread_saved_fp(task); 39 stack = (unsigned long *)thread_saved_fp(task);
40 else 40 else
41#ifdef CONFIG_STACKTRACE
42 asm volatile("mov %0, r8\n":"=r"(stack)::"memory");
43#else
41 stack = (unsigned long *)&stack; 44 stack = (unsigned long *)&stack;
45#endif
42 } 46 }
43 47
44 show_trace(stack); 48 show_trace(stack);
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index 57f1afe19a52..f2f12fff36f7 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -8,6 +8,7 @@
8#include <linux/ptrace.h> 8#include <linux/ptrace.h>
9#include <linux/regset.h> 9#include <linux/regset.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/sched/task_stack.h>
11#include <linux/signal.h> 12#include <linux/signal.h>
12#include <linux/smp.h> 13#include <linux/smp.h>
13#include <linux/uaccess.h> 14#include <linux/uaccess.h>
@@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target,
159static const struct user_regset csky_regsets[] = { 160static const struct user_regset csky_regsets[] = {
160 [REGSET_GPR] = { 161 [REGSET_GPR] = {
161 .core_note_type = NT_PRSTATUS, 162 .core_note_type = NT_PRSTATUS,
162 .n = ELF_NGREG, 163 .n = sizeof(struct pt_regs) / sizeof(u32),
163 .size = sizeof(u32), 164 .size = sizeof(u32),
164 .align = sizeof(u32), 165 .align = sizeof(u32),
165 .get = &gpr_get, 166 .get = &gpr_get,
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index ddc4dd79f282..b07a534b3062 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
160{ 160{
161 unsigned long mask = 1 << cpu; 161 unsigned long mask = 1 << cpu;
162 162
163 secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; 163 secondary_stack =
164 (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
164 secondary_hint = mfcr("cr31"); 165 secondary_hint = mfcr("cr31");
165 secondary_ccr = mfcr("cr18"); 166 secondary_ccr = mfcr("cr18");
166 167
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c
index cb7c03e5cd21..8473b6bdf512 100644
--- a/arch/csky/mm/ioremap.c
+++ b/arch/csky/mm/ioremap.c
@@ -46,3 +46,17 @@ void iounmap(void __iomem *addr)
46 vunmap((void *)((unsigned long)addr & PAGE_MASK)); 46 vunmap((void *)((unsigned long)addr & PAGE_MASK));
47} 47}
48EXPORT_SYMBOL(iounmap); 48EXPORT_SYMBOL(iounmap);
49
50pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
51 unsigned long size, pgprot_t vma_prot)
52{
53 if (!pfn_valid(pfn)) {
54 vma_prot.pgprot |= _PAGE_SO;
55 return pgprot_noncached(vma_prot);
56 } else if (file->f_flags & O_SYNC) {
57 return pgprot_noncached(vma_prot);
58 }
59
60 return vma_prot;
61}
62EXPORT_SYMBOL(phys_mem_access_prot);
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index b16710a8a9e7..76e9bf88d3b9 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -79,8 +79,6 @@ enum reg_val_type {
79 REG_64BIT_32BIT, 79 REG_64BIT_32BIT,
80 /* 32-bit compatible, need truncation for 64-bit ops. */ 80 /* 32-bit compatible, need truncation for 64-bit ops. */
81 REG_32BIT, 81 REG_32BIT,
82 /* 32-bit zero extended. */
83 REG_32BIT_ZERO_EX,
84 /* 32-bit no sign/zero extension needed. */ 82 /* 32-bit no sign/zero extension needed. */
85 REG_32BIT_POS 83 REG_32BIT_POS
86}; 84};
@@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
343 const struct bpf_prog *prog = ctx->skf; 341 const struct bpf_prog *prog = ctx->skf;
344 int stack_adjust = ctx->stack_size; 342 int stack_adjust = ctx->stack_size;
345 int store_offset = stack_adjust - 8; 343 int store_offset = stack_adjust - 8;
344 enum reg_val_type td;
346 int r0 = MIPS_R_V0; 345 int r0 = MIPS_R_V0;
347 346
348 if (dest_reg == MIPS_R_RA && 347 if (dest_reg == MIPS_R_RA) {
349 get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
350 /* Don't let zero extended value escape. */ 348 /* Don't let zero extended value escape. */
351 emit_instr(ctx, sll, r0, r0, 0); 349 td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
350 if (td == REG_64BIT)
351 emit_instr(ctx, sll, r0, r0, 0);
352 }
352 353
353 if (ctx->flags & EBPF_SAVE_RA) { 354 if (ctx->flags & EBPF_SAVE_RA) {
354 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); 355 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
@@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
692 if (dst < 0) 693 if (dst < 0)
693 return dst; 694 return dst;
694 td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 695 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
695 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 696 if (td == REG_64BIT) {
696 /* sign extend */ 697 /* sign extend */
697 emit_instr(ctx, sll, dst, dst, 0); 698 emit_instr(ctx, sll, dst, dst, 0);
698 } 699 }
@@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
707 if (dst < 0) 708 if (dst < 0)
708 return dst; 709 return dst;
709 td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 710 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
710 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 711 if (td == REG_64BIT) {
711 /* sign extend */ 712 /* sign extend */
712 emit_instr(ctx, sll, dst, dst, 0); 713 emit_instr(ctx, sll, dst, dst, 0);
713 } 714 }
@@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
721 if (dst < 0) 722 if (dst < 0)
722 return dst; 723 return dst;
723 td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 724 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
724 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) 725 if (td == REG_64BIT)
725 /* sign extend */ 726 /* sign extend */
726 emit_instr(ctx, sll, dst, dst, 0); 727 emit_instr(ctx, sll, dst, dst, 0);
727 if (insn->imm == 1) { 728 if (insn->imm == 1) {
@@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
860 if (src < 0 || dst < 0) 861 if (src < 0 || dst < 0)
861 return -EINVAL; 862 return -EINVAL;
862 td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 863 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
863 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 864 if (td == REG_64BIT) {
864 /* sign extend */ 865 /* sign extend */
865 emit_instr(ctx, sll, dst, dst, 0); 866 emit_instr(ctx, sll, dst, dst, 0);
866 } 867 }
867 did_move = false; 868 did_move = false;
868 ts = get_reg_val_type(ctx, this_idx, insn->src_reg); 869 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
869 if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { 870 if (ts == REG_64BIT) {
870 int tmp_reg = MIPS_R_AT; 871 int tmp_reg = MIPS_R_AT;
871 872
872 if (bpf_op == BPF_MOV) { 873 if (bpf_op == BPF_MOV) {
@@ -1254,8 +1255,7 @@ jeq_common:
1254 if (insn->imm == 64 && td == REG_32BIT) 1255 if (insn->imm == 64 && td == REG_32BIT)
1255 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 1256 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
1256 1257
1257 if (insn->imm != 64 && 1258 if (insn->imm != 64 && td == REG_64BIT) {
1258 (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
1259 /* sign extend */ 1259 /* sign extend */
1260 emit_instr(ctx, sll, dst, dst, 0); 1260 emit_instr(ctx, sll, dst, dst, 0);
1261 } 1261 }
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 2582df1c529b..0964c236e3e5 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
308 308
309long do_syscall_trace_enter(struct pt_regs *regs) 309long do_syscall_trace_enter(struct pt_regs *regs)
310{ 310{
311 if (test_thread_flag(TIF_SYSCALL_TRACE) && 311 if (test_thread_flag(TIF_SYSCALL_TRACE)) {
312 tracehook_report_syscall_entry(regs)) { 312 int rc = tracehook_report_syscall_entry(regs);
313
313 /* 314 /*
314 * Tracing decided this syscall should not happen or the 315 * As tracesys_next does not set %r28 to -ENOSYS
315 * debugger stored an invalid system call number. Skip 316 * when %r20 is set to -1, initialize it here.
316 * the system call and the system call restart handling.
317 */ 317 */
318 regs->gr[20] = -1UL; 318 regs->gr[28] = -ENOSYS;
319 goto out; 319
320 if (rc) {
321 /*
322 * A nonzero return code from
323 * tracehook_report_syscall_entry() tells us
324 * to prevent the syscall execution. Skip
325 * the syscall call and the syscall restart handling.
326 *
327 * Note that the tracer may also just change
328 * regs->gr[20] to an invalid syscall number,
329 * that is handled by tracesys_next.
330 */
331 regs->gr[20] = -1UL;
332 return -1;
333 }
320 } 334 }
321 335
322 /* Do the secure computing check after ptrace. */ 336 /* Do the secure computing check after ptrace. */
@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
340 regs->gr[24] & 0xffffffff, 354 regs->gr[24] & 0xffffffff,
341 regs->gr[23] & 0xffffffff); 355 regs->gr[23] & 0xffffffff);
342 356
343out:
344 /* 357 /*
345 * Sign extend the syscall number to 64bit since it may have been 358 * Sign extend the syscall number to 64bit since it may have been
346 * modified by a compat ptrace call 359 * modified by a compat ptrace call
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index c9bfe526ca9d..d8c8d7c9df15 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud)
904 904
905static inline int pud_present(pud_t pud) 905static inline int pud_present(pud_t pud)
906{ 906{
907 return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); 907 return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
908} 908}
909 909
910extern struct page *pud_page(pud_t pud); 910extern struct page *pud_page(pud_t pud);
@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd)
951 951
952static inline int pgd_present(pgd_t pgd) 952static inline int pgd_present(pgd_t pgd)
953{ 953{
954 return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); 954 return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
955} 955}
956 956
957static inline pte_t pgd_pte(pgd_t pgd) 957static inline pte_t pgd_pte(pgd_t pgd)
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 7db3119f8a5b..145373f0e5dc 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1593,6 +1593,8 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1593 1593
1594 pnv_pci_ioda2_setup_dma_pe(phb, pe); 1594 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1595#ifdef CONFIG_IOMMU_API 1595#ifdef CONFIG_IOMMU_API
1596 iommu_register_group(&pe->table_group,
1597 pe->phb->hose->global_number, pe->pe_number);
1596 pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL); 1598 pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL);
1597#endif 1599#endif
1598 } 1600 }
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 45fb70b4bfa7..ef9448a907c6 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -1147,6 +1147,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
1147 return 0; 1147 return 0;
1148 1148
1149 pe = &phb->ioda.pe_array[pdn->pe_number]; 1149 pe = &phb->ioda.pe_array[pdn->pe_number];
1150 if (!pe->table_group.group)
1151 return 0;
1150 iommu_add_device(&pe->table_group, dev); 1152 iommu_add_device(&pe->table_group, dev);
1151 return 0; 1153 return 0;
1152 case BUS_NOTIFY_DEL_DEVICE: 1154 case BUS_NOTIFY_DEL_DEVICE:
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index 2fa2942be221..470755cb7558 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -35,6 +35,12 @@
35#define _PAGE_SPECIAL _PAGE_SOFT 35#define _PAGE_SPECIAL _PAGE_SOFT
36#define _PAGE_TABLE _PAGE_PRESENT 36#define _PAGE_TABLE _PAGE_PRESENT
37 37
38/*
39 * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
40 * distinguish them from swapped out pages
41 */
42#define _PAGE_PROT_NONE _PAGE_READ
43
38#define _PAGE_PFN_SHIFT 10 44#define _PAGE_PFN_SHIFT 10
39 45
40/* Set of bits to preserve across pte_modify() */ 46/* Set of bits to preserve across pte_modify() */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 16301966d65b..a8179a8c1491 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -44,7 +44,7 @@
44/* Page protection bits */ 44/* Page protection bits */
45#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 45#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
46 46
47#define PAGE_NONE __pgprot(0) 47#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
48#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 48#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
49#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 49#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
50#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) 50#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
@@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
98 98
99static inline int pmd_present(pmd_t pmd) 99static inline int pmd_present(pmd_t pmd)
100{ 100{
101 return (pmd_val(pmd) & _PAGE_PRESENT); 101 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
102} 102}
103 103
104static inline int pmd_none(pmd_t pmd) 104static inline int pmd_none(pmd_t pmd)
@@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
178 178
179static inline int pte_present(pte_t pte) 179static inline int pte_present(pte_t pte)
180{ 180{
181 return (pte_val(pte) & _PAGE_PRESENT); 181 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
182} 182}
183 183
184static inline int pte_none(pte_t pte) 184static inline int pte_none(pte_t pte)
@@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
380 * 380 *
381 * Format of swap PTE: 381 * Format of swap PTE:
382 * bit 0: _PAGE_PRESENT (zero) 382 * bit 0: _PAGE_PRESENT (zero)
383 * bit 1: reserved for future use (zero) 383 * bit 1: _PAGE_PROT_NONE (zero)
384 * bits 2 to 6: swap type 384 * bits 2 to 6: swap type
385 * bits 7 to XLEN-1: swap offset 385 * bits 7 to XLEN-1: swap offset
386 */ 386 */
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 1e1395d63dab..65df1dfdc303 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -18,8 +18,6 @@
18#include <asm/cache.h> 18#include <asm/cache.h>
19#include <asm/thread_info.h> 19#include <asm/thread_info.h>
20 20
21#define MAX_BYTES_PER_LONG 0x10
22
23OUTPUT_ARCH(riscv) 21OUTPUT_ARCH(riscv)
24ENTRY(_start) 22ENTRY(_start)
25 23
@@ -76,6 +74,8 @@ SECTIONS
76 *(.sbss*) 74 *(.sbss*)
77 } 75 }
78 76
77 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
78
79 EXCEPTION_TABLE(0x10) 79 EXCEPTION_TABLE(0x10)
80 NOTES 80 NOTES
81 81
@@ -83,10 +83,6 @@ SECTIONS
83 *(.rel.dyn*) 83 *(.rel.dyn*)
84 } 84 }
85 85
86 BSS_SECTION(MAX_BYTES_PER_LONG,
87 MAX_BYTES_PER_LONG,
88 MAX_BYTES_PER_LONG)
89
90 _end = .; 86 _end = .;
91 87
92 STABS_DEBUG 88 STABS_DEBUG
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 537f97fde37f..b6796e616812 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -30,10 +30,10 @@
30 .section .text 30 .section .text
31ENTRY(swsusp_arch_suspend) 31ENTRY(swsusp_arch_suspend)
32 lg %r1,__LC_NODAT_STACK 32 lg %r1,__LC_NODAT_STACK
33 aghi %r1,-STACK_FRAME_OVERHEAD
34 stmg %r6,%r15,__SF_GPRS(%r1) 33 stmg %r6,%r15,__SF_GPRS(%r1)
34 aghi %r1,-STACK_FRAME_OVERHEAD
35 stg %r15,__SF_BACKCHAIN(%r1) 35 stg %r15,__SF_BACKCHAIN(%r1)
36 lgr %r1,%r15 36 lgr %r15,%r1
37 37
38 /* Store FPU registers */ 38 /* Store FPU registers */
39 brasl %r14,save_fpu_regs 39 brasl %r14,save_fpu_regs
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a153257bf7d9..d62fa148558b 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
297 scb_s->crycbd = 0; 297 scb_s->crycbd = 0;
298 298
299 apie_h = vcpu->arch.sie_block->eca & ECA_APIE; 299 apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
300 if (!apie_h && !key_msk) 300 if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
301 return 0; 301 return 0;
302 302
303 if (!crycb_addr) 303 if (!crycb_addr)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index a966d7bfac57..4266a4de3160 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -382,7 +382,9 @@ static void zpci_irq_handler(struct airq_struct *airq)
382 if (ai == -1UL) 382 if (ai == -1UL)
383 break; 383 break;
384 inc_irq_stat(IRQIO_MSI); 384 inc_irq_stat(IRQIO_MSI);
385 airq_iv_lock(aibv, ai);
385 generic_handle_irq(airq_iv_get_data(aibv, ai)); 386 generic_handle_irq(airq_iv_get_data(aibv, ai));
387 airq_iv_unlock(aibv, ai);
386 } 388 }
387 } 389 }
388} 390}
@@ -408,7 +410,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
408 zdev->aisb = aisb; 410 zdev->aisb = aisb;
409 411
410 /* Create adapter interrupt vector */ 412 /* Create adapter interrupt vector */
411 zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA); 413 zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
412 if (!zdev->aibv) 414 if (!zdev->aibv)
413 return -ENOMEM; 415 return -ENOMEM;
414 416
diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile
index 01d0f7fb14cc..2563d1e532e2 100644
--- a/arch/sh/boot/dts/Makefile
+++ b/arch/sh/boot/dts/Makefile
@@ -1,3 +1,3 @@
1ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") 1ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
2obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o 2obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
3endif 3endif
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 374a19712e20..b684f0294f35 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
2278 x86_pmu.check_microcode(); 2278 x86_pmu.check_microcode();
2279} 2279}
2280 2280
2281static int x86_pmu_check_period(struct perf_event *event, u64 value)
2282{
2283 if (x86_pmu.check_period && x86_pmu.check_period(event, value))
2284 return -EINVAL;
2285
2286 if (value && x86_pmu.limit_period) {
2287 if (x86_pmu.limit_period(event, value) > value)
2288 return -EINVAL;
2289 }
2290
2291 return 0;
2292}
2293
2281static struct pmu pmu = { 2294static struct pmu pmu = {
2282 .pmu_enable = x86_pmu_enable, 2295 .pmu_enable = x86_pmu_enable,
2283 .pmu_disable = x86_pmu_disable, 2296 .pmu_disable = x86_pmu_disable,
@@ -2302,6 +2315,7 @@ static struct pmu pmu = {
2302 .event_idx = x86_pmu_event_idx, 2315 .event_idx = x86_pmu_event_idx,
2303 .sched_task = x86_pmu_sched_task, 2316 .sched_task = x86_pmu_sched_task,
2304 .task_ctx_size = sizeof(struct x86_perf_task_context), 2317 .task_ctx_size = sizeof(struct x86_perf_task_context),
2318 .check_period = x86_pmu_check_period,
2305}; 2319};
2306 2320
2307void arch_perf_update_userpage(struct perf_event *event, 2321void arch_perf_update_userpage(struct perf_event *event,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index daafb893449b..730978dff63f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
3587 intel_pmu_lbr_sched_task(ctx, sched_in); 3587 intel_pmu_lbr_sched_task(ctx, sched_in);
3588} 3588}
3589 3589
3590static int intel_pmu_check_period(struct perf_event *event, u64 value)
3591{
3592 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3593}
3594
3590PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 3595PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3591 3596
3592PMU_FORMAT_ATTR(ldlat, "config1:0-15"); 3597PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = {
3667 .cpu_starting = intel_pmu_cpu_starting, 3672 .cpu_starting = intel_pmu_cpu_starting,
3668 .cpu_dying = intel_pmu_cpu_dying, 3673 .cpu_dying = intel_pmu_cpu_dying,
3669 .cpu_dead = intel_pmu_cpu_dead, 3674 .cpu_dead = intel_pmu_cpu_dead,
3675
3676 .check_period = intel_pmu_check_period,
3670}; 3677};
3671 3678
3672static struct attribute *intel_pmu_attrs[]; 3679static struct attribute *intel_pmu_attrs[];
@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = {
3711 3718
3712 .guest_get_msrs = intel_guest_get_msrs, 3719 .guest_get_msrs = intel_guest_get_msrs,
3713 .sched_task = intel_pmu_sched_task, 3720 .sched_task = intel_pmu_sched_task,
3721
3722 .check_period = intel_pmu_check_period,
3714}; 3723};
3715 3724
3716static __init void intel_clovertown_quirk(void) 3725static __init void intel_clovertown_quirk(void)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 78d7b7031bfc..d46fd6754d92 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -646,6 +646,11 @@ struct x86_pmu {
646 * Intel host/guest support (KVM) 646 * Intel host/guest support (KVM)
647 */ 647 */
648 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); 648 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
649
650 /*
651 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
652 */
653 int (*check_period) (struct perf_event *event, u64 period);
649}; 654};
650 655
651struct x86_perf_task_context { 656struct x86_perf_task_context {
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void)
857 862
858#ifdef CONFIG_CPU_SUP_INTEL 863#ifdef CONFIG_CPU_SUP_INTEL
859 864
860static inline bool intel_pmu_has_bts(struct perf_event *event) 865static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
861{ 866{
862 struct hw_perf_event *hwc = &event->hw; 867 struct hw_perf_event *hwc = &event->hw;
863 unsigned int hw_event, bts_event; 868 unsigned int hw_event, bts_event;
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
868 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; 873 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
869 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 874 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
870 875
871 return hw_event == bts_event && hwc->sample_period == 1; 876 return hw_event == bts_event && period == 1;
877}
878
879static inline bool intel_pmu_has_bts(struct perf_event *event)
880{
881 struct hw_perf_event *hwc = &event->hw;
882
883 return intel_pmu_has_bts_period(event, hwc->sample_period);
872} 884}
873 885
874int intel_pmu_save_and_restart(struct perf_event *event); 886int intel_pmu_save_and_restart(struct perf_event *event);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index f65b78d32f5e..7dbbe9ffda17 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
51/* 51/*
52 * fill in the user structure for a core dump.. 52 * fill in the user structure for a core dump..
53 */ 53 */
54static void dump_thread32(struct pt_regs *regs, struct user32 *dump) 54static void fill_dump(struct pt_regs *regs, struct user32 *dump)
55{ 55{
56 u32 fs, gs; 56 u32 fs, gs;
57 memset(dump, 0, sizeof(*dump)); 57 memset(dump, 0, sizeof(*dump));
@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm)
157 fs = get_fs(); 157 fs = get_fs();
158 set_fs(KERNEL_DS); 158 set_fs(KERNEL_DS);
159 has_dumped = 1; 159 has_dumped = 1;
160
161 fill_dump(cprm->regs, &dump);
162
160 strncpy(dump.u_comm, current->comm, sizeof(current->comm)); 163 strncpy(dump.u_comm, current->comm, sizeof(current->comm));
161 dump.u_ar0 = offsetof(struct user32, regs); 164 dump.u_ar0 = offsetof(struct user32, regs);
162 dump.signal = cprm->siginfo->si_signo; 165 dump.signal = cprm->siginfo->si_signo;
163 dump_thread32(cprm->regs, &dump);
164 166
165 /* 167 /*
166 * If the size of the dump file exceeds the rlimit, then see 168 * If the size of the dump file exceeds the rlimit, then see
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index d9a9993af882..9f15384c504a 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -52,6 +52,8 @@
52 52
53#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 53#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
54 54
55#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
56
55/* "Small Core" Processors (Atom) */ 57/* "Small Core" Processors (Atom) */
56 58
57#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ 59#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4660ce90de7f..180373360e34 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -299,6 +299,7 @@ union kvm_mmu_extended_role {
299 unsigned int cr4_smap:1; 299 unsigned int cr4_smap:1;
300 unsigned int cr4_smep:1; 300 unsigned int cr4_smep:1;
301 unsigned int cr4_la57:1; 301 unsigned int cr4_la57:1;
302 unsigned int maxphyaddr:6;
302 }; 303 };
303}; 304};
304 305
@@ -397,6 +398,7 @@ struct kvm_mmu {
397 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 398 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
398 u64 *spte, const void *pte); 399 u64 *spte, const void *pte);
399 hpa_t root_hpa; 400 hpa_t root_hpa;
401 gpa_t root_cr3;
400 union kvm_mmu_role mmu_role; 402 union kvm_mmu_role mmu_role;
401 u8 root_level; 403 u8 root_level;
402 u8 shadow_root_level; 404 u8 shadow_root_level;
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index e652a7cc6186..3f697a9e3f59 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -48,7 +48,8 @@ enum {
48 BIOS_STATUS_SUCCESS = 0, 48 BIOS_STATUS_SUCCESS = 0,
49 BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, 49 BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
50 BIOS_STATUS_EINVAL = -EINVAL, 50 BIOS_STATUS_EINVAL = -EINVAL,
51 BIOS_STATUS_UNAVAIL = -EBUSY 51 BIOS_STATUS_UNAVAIL = -EBUSY,
52 BIOS_STATUS_ABORT = -EINTR,
52}; 53};
53 54
54/* Address map parameters */ 55/* Address map parameters */
@@ -167,4 +168,9 @@ extern long system_serial_number;
167 168
168extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ 169extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
169 170
171/*
172 * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
173 */
174extern struct semaphore __efi_uv_runtime_lock;
175
170#endif /* _ASM_X86_UV_BIOS_H */ 176#endif /* _ASM_X86_UV_BIOS_H */
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index bbffa6c54697..c07958b59f50 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -335,6 +335,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
335 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; 335 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
336 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; 336 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
337 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; 337 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
338 unsigned f_la57 = 0;
338 339
339 /* cpuid 1.edx */ 340 /* cpuid 1.edx */
340 const u32 kvm_cpuid_1_edx_x86_features = 341 const u32 kvm_cpuid_1_edx_x86_features =
@@ -489,7 +490,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
489 // TSC_ADJUST is emulated 490 // TSC_ADJUST is emulated
490 entry->ebx |= F(TSC_ADJUST); 491 entry->ebx |= F(TSC_ADJUST);
491 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; 492 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
493 f_la57 = entry->ecx & F(LA57);
492 cpuid_mask(&entry->ecx, CPUID_7_ECX); 494 cpuid_mask(&entry->ecx, CPUID_7_ECX);
495 /* Set LA57 based on hardware capability. */
496 entry->ecx |= f_la57;
493 entry->ecx |= f_umip; 497 entry->ecx |= f_umip;
494 /* PKU is not yet implemented for shadow paging. */ 498 /* PKU is not yet implemented for shadow paging. */
495 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) 499 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index da9c42349b1f..f2d1d230d5b8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3555 &invalid_list); 3555 &invalid_list);
3556 mmu->root_hpa = INVALID_PAGE; 3556 mmu->root_hpa = INVALID_PAGE;
3557 } 3557 }
3558 mmu->root_cr3 = 0;
3558 } 3559 }
3559 3560
3560 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 3561 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3610 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); 3611 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3611 } else 3612 } else
3612 BUG(); 3613 BUG();
3614 vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
3613 3615
3614 return 0; 3616 return 0;
3615} 3617}
@@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3618{ 3620{
3619 struct kvm_mmu_page *sp; 3621 struct kvm_mmu_page *sp;
3620 u64 pdptr, pm_mask; 3622 u64 pdptr, pm_mask;
3621 gfn_t root_gfn; 3623 gfn_t root_gfn, root_cr3;
3622 int i; 3624 int i;
3623 3625
3624 root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT; 3626 root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
3627 root_gfn = root_cr3 >> PAGE_SHIFT;
3625 3628
3626 if (mmu_check_root(vcpu, root_gfn)) 3629 if (mmu_check_root(vcpu, root_gfn))
3627 return 1; 3630 return 1;
@@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3646 ++sp->root_count; 3649 ++sp->root_count;
3647 spin_unlock(&vcpu->kvm->mmu_lock); 3650 spin_unlock(&vcpu->kvm->mmu_lock);
3648 vcpu->arch.mmu->root_hpa = root; 3651 vcpu->arch.mmu->root_hpa = root;
3649 return 0; 3652 goto set_root_cr3;
3650 } 3653 }
3651 3654
3652 /* 3655 /*
@@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3712 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); 3715 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
3713 } 3716 }
3714 3717
3718set_root_cr3:
3719 vcpu->arch.mmu->root_cr3 = root_cr3;
3720
3715 return 0; 3721 return 0;
3716} 3722}
3717 3723
@@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4163 struct kvm_mmu_root_info root; 4169 struct kvm_mmu_root_info root;
4164 struct kvm_mmu *mmu = vcpu->arch.mmu; 4170 struct kvm_mmu *mmu = vcpu->arch.mmu;
4165 4171
4166 root.cr3 = mmu->get_cr3(vcpu); 4172 root.cr3 = mmu->root_cr3;
4167 root.hpa = mmu->root_hpa; 4173 root.hpa = mmu->root_hpa;
4168 4174
4169 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 4175 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
@@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4176 } 4182 }
4177 4183
4178 mmu->root_hpa = root.hpa; 4184 mmu->root_hpa = root.hpa;
4185 mmu->root_cr3 = root.cr3;
4179 4186
4180 return i < KVM_MMU_NUM_PREV_ROOTS; 4187 return i < KVM_MMU_NUM_PREV_ROOTS;
4181} 4188}
@@ -4770,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
4770 ext.cr4_pse = !!is_pse(vcpu); 4777 ext.cr4_pse = !!is_pse(vcpu);
4771 ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); 4778 ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
4772 ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); 4779 ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
4780 ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
4773 4781
4774 ext.valid = 1; 4782 ext.valid = 1;
4775 4783
@@ -5516,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
5516 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 5524 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5517 5525
5518 vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; 5526 vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
5527 vcpu->arch.root_mmu.root_cr3 = 0;
5519 vcpu->arch.root_mmu.translate_gpa = translate_gpa; 5528 vcpu->arch.root_mmu.translate_gpa = translate_gpa;
5520 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 5529 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5521 vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; 5530 vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5522 5531
5523 vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; 5532 vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
5533 vcpu->arch.guest_mmu.root_cr3 = 0;
5524 vcpu->arch.guest_mmu.translate_gpa = translate_gpa; 5534 vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
5525 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 5535 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5526 vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; 5536 vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d8ea4ebd79e7..d737a51a53ca 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2473,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2473 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2473 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2474 return -EINVAL; 2474 return -EINVAL;
2475 2475
2476 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2477 nested_cpu_has_save_preemption_timer(vmcs12))
2478 return -EINVAL;
2479
2476 if (nested_cpu_has_ept(vmcs12) && 2480 if (nested_cpu_has_ept(vmcs12) &&
2477 !valid_ept_address(vcpu, vmcs12->ept_pointer)) 2481 !valid_ept_address(vcpu, vmcs12->ept_pointer))
2478 return -EINVAL; 2482 return -EINVAL;
@@ -5557,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
5557 * secondary cpu-based controls. Do not include those that 5561 * secondary cpu-based controls. Do not include those that
5558 * depend on CPUID bits, they are added later by vmx_cpuid_update. 5562 * depend on CPUID bits, they are added later by vmx_cpuid_update.
5559 */ 5563 */
5560 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 5564 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
5561 msrs->secondary_ctls_low, 5565 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
5562 msrs->secondary_ctls_high); 5566 msrs->secondary_ctls_low,
5567 msrs->secondary_ctls_high);
5568
5563 msrs->secondary_ctls_low = 0; 5569 msrs->secondary_ctls_low = 0;
5564 msrs->secondary_ctls_high &= 5570 msrs->secondary_ctls_high &=
5565 SECONDARY_EXEC_DESC | 5571 SECONDARY_EXEC_DESC |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 95d618045001..30a6bcd735ec 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -863,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
863 if (!entry_only) 863 if (!entry_only)
864 j = find_msr(&m->host, msr); 864 j = find_msr(&m->host, msr);
865 865
866 if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { 866 if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
867 (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
867 printk_once(KERN_WARNING "Not enough msr switch entries. " 868 printk_once(KERN_WARNING "Not enough msr switch entries. "
868 "Can't add msr %x\n", msr); 869 "Can't add msr %x\n", msr);
869 return; 870 return;
@@ -1193,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
1193 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) 1194 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
1194 return; 1195 return;
1195 1196
1196 /*
1197 * First handle the simple case where no cmpxchg is necessary; just
1198 * allow posting non-urgent interrupts.
1199 *
1200 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
1201 * PI.NDST: pi_post_block will do it for us and the wakeup_handler
1202 * expects the VCPU to be on the blocked_vcpu_list that matches
1203 * PI.NDST.
1204 */
1205 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
1206 vcpu->cpu == cpu) {
1207 pi_clear_sn(pi_desc);
1208 return;
1209 }
1210
1211 /* The full case. */ 1197 /* The full case. */
1212 do { 1198 do {
1213 old.control = new.control = pi_desc->control; 1199 old.control = new.control = pi_desc->control;
@@ -1222,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
1222 new.sn = 0; 1208 new.sn = 0;
1223 } while (cmpxchg64(&pi_desc->control, old.control, 1209 } while (cmpxchg64(&pi_desc->control, old.control,
1224 new.control) != old.control); 1210 new.control) != old.control);
1211
1212 /*
1213 * Clear SN before reading the bitmap. The VT-d firmware
1214 * writes the bitmap and reads SN atomically (5.2.3 in the
1215 * spec), so it doesn't really have a memory barrier that
1216 * pairs with this, but we cannot do that and we need one.
1217 */
1218 smp_mb__after_atomic();
1219
1220 if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS))
1221 pi_set_on(pi_desc);
1225} 1222}
1226 1223
1227/* 1224/*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 99328954c2fc..0ac0a64c7790 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
337 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); 337 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
338} 338}
339 339
340static inline void pi_clear_sn(struct pi_desc *pi_desc) 340static inline void pi_set_sn(struct pi_desc *pi_desc)
341{ 341{
342 return clear_bit(POSTED_INTR_SN, 342 return set_bit(POSTED_INTR_SN,
343 (unsigned long *)&pi_desc->control); 343 (unsigned long *)&pi_desc->control);
344} 344}
345 345
346static inline void pi_set_sn(struct pi_desc *pi_desc) 346static inline void pi_set_on(struct pi_desc *pi_desc)
347{ 347{
348 return set_bit(POSTED_INTR_SN, 348 set_bit(POSTED_INTR_ON,
349 (unsigned long *)&pi_desc->control); 349 (unsigned long *)&pi_desc->control);
350} 350}
351 351
352static inline void pi_clear_on(struct pi_desc *pi_desc) 352static inline void pi_clear_on(struct pi_desc *pi_desc)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e67ecf25e690..941f932373d0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7801,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
7801 * 1) We should set ->mode before checking ->requests. Please see 7801 * 1) We should set ->mode before checking ->requests. Please see
7802 * the comment in kvm_vcpu_exiting_guest_mode(). 7802 * the comment in kvm_vcpu_exiting_guest_mode().
7803 * 7803 *
7804 * 2) For APICv, we should set ->mode before checking PIR.ON. This 7804 * 2) For APICv, we should set ->mode before checking PID.ON. This
7805 * pairs with the memory barrier implicit in pi_test_and_set_on 7805 * pairs with the memory barrier implicit in pi_test_and_set_on
7806 * (see vmx_deliver_posted_interrupt). 7806 * (see vmx_deliver_posted_interrupt).
7807 * 7807 *
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 4a6a5a26c582..eb33432f2f24 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -29,7 +29,8 @@
29 29
30struct uv_systab *uv_systab; 30struct uv_systab *uv_systab;
31 31
32s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) 32static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
33 u64 a4, u64 a5)
33{ 34{
34 struct uv_systab *tab = uv_systab; 35 struct uv_systab *tab = uv_systab;
35 s64 ret; 36 s64 ret;
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
51 52
52 return ret; 53 return ret;
53} 54}
55
56s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
57{
58 s64 ret;
59
60 if (down_interruptible(&__efi_uv_runtime_lock))
61 return BIOS_STATUS_ABORT;
62
63 ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
64 up(&__efi_uv_runtime_lock);
65
66 return ret;
67}
54EXPORT_SYMBOL_GPL(uv_bios_call); 68EXPORT_SYMBOL_GPL(uv_bios_call);
55 69
56s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, 70s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
59 unsigned long bios_flags; 73 unsigned long bios_flags;
60 s64 ret; 74 s64 ret;
61 75
76 if (down_interruptible(&__efi_uv_runtime_lock))
77 return BIOS_STATUS_ABORT;
78
62 local_irq_save(bios_flags); 79 local_irq_save(bios_flags);
63 ret = uv_bios_call(which, a1, a2, a3, a4, a5); 80 ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
64 local_irq_restore(bios_flags); 81 local_irq_restore(bios_flags);
65 82
83 up(&__efi_uv_runtime_lock);
84
66 return ret; 85 return ret;
67} 86}
68 87
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8f5b533764ca..9437a5eb07cf 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -737,12 +737,20 @@ static void blk_mq_requeue_work(struct work_struct *work)
737 spin_unlock_irq(&q->requeue_lock); 737 spin_unlock_irq(&q->requeue_lock);
738 738
739 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 739 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
740 if (!(rq->rq_flags & RQF_SOFTBARRIER)) 740 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
741 continue; 741 continue;
742 742
743 rq->rq_flags &= ~RQF_SOFTBARRIER; 743 rq->rq_flags &= ~RQF_SOFTBARRIER;
744 list_del_init(&rq->queuelist); 744 list_del_init(&rq->queuelist);
745 blk_mq_sched_insert_request(rq, true, false, false); 745 /*
746 * If RQF_DONTPREP, rq has contained some driver specific
747 * data, so insert it to hctx dispatch list to avoid any
748 * merge.
749 */
750 if (rq->rq_flags & RQF_DONTPREP)
751 blk_mq_request_bypass_insert(rq, false);
752 else
753 blk_mq_sched_insert_request(rq, true, false, false);
746 } 754 }
747 755
748 while (!list_empty(&rq_list)) { 756 while (!list_empty(&rq_list)) {
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 17eb09d222ff..ec78a04eb136 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
122 122
123int af_alg_release(struct socket *sock) 123int af_alg_release(struct socket *sock)
124{ 124{
125 if (sock->sk) 125 if (sock->sk) {
126 sock_put(sock->sk); 126 sock_put(sock->sk);
127 sock->sk = NULL;
128 }
127 return 0; 129 return 0;
128} 130}
129EXPORT_SYMBOL_GPL(af_alg_release); 131EXPORT_SYMBOL_GPL(af_alg_release);
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index a43276c76fc6..21393ec3b9a4 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client)
509 struct ht16k33_priv *priv = i2c_get_clientdata(client); 509 struct ht16k33_priv *priv = i2c_get_clientdata(client);
510 struct ht16k33_fbdev *fbdev = &priv->fbdev; 510 struct ht16k33_fbdev *fbdev = &priv->fbdev;
511 511
512 cancel_delayed_work(&fbdev->work); 512 cancel_delayed_work_sync(&fbdev->work);
513 unregister_framebuffer(fbdev->info); 513 unregister_framebuffer(fbdev->info);
514 framebuffer_release(fbdev->info); 514 framebuffer_release(fbdev->info);
515 free_page((unsigned long) fbdev->buffer); 515 free_page((unsigned long) fbdev->buffer);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0ea2139c50d8..ccd296dbb95c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -95,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
95static void pm_runtime_deactivate_timer(struct device *dev) 95static void pm_runtime_deactivate_timer(struct device *dev)
96{ 96{
97 if (dev->power.timer_expires > 0) { 97 if (dev->power.timer_expires > 0) {
98 hrtimer_cancel(&dev->power.suspend_timer); 98 hrtimer_try_to_cancel(&dev->power.suspend_timer);
99 dev->power.timer_expires = 0; 99 dev->power.timer_expires = 0;
100 } 100 }
101} 101}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 6f2856c6d0f2..55481b40df9a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
4075 4075
4076 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 4076 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
4077 if (lock_fdc(drive)) 4077 if (lock_fdc(drive))
4078 return -EINTR; 4078 return 0;
4079 poll_drive(false, 0); 4079 poll_drive(false, 0);
4080 process_fd_request(); 4080 process_fd_request();
4081 } 4081 }
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f94d33525771..d299ec79e4c3 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
781 SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, 781 SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
782 SYSC_QUIRK_LEGACY_IDLE), 782 SYSC_QUIRK_LEGACY_IDLE),
783 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 783 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
784 SYSC_QUIRK_LEGACY_IDLE), 784 0),
785 /* Some timers on omap4 and later */ 785 /* Some timers on omap4 and later */
786 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, 786 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff,
787 SYSC_QUIRK_LEGACY_IDLE), 787 0),
788 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, 788 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
789 SYSC_QUIRK_LEGACY_IDLE), 789 0),
790 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, 790 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
791 SYSC_QUIRK_LEGACY_IDLE), 791 SYSC_QUIRK_LEGACY_IDLE),
792 /* Uarts on omap4 and later */ 792 /* Uarts on omap4 and later */
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index d2f0bb5ba47e..e705aab9e38b 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -290,6 +290,12 @@ config COMMON_CLK_BD718XX
290 This driver supports ROHM BD71837 and ROHM BD71847 290 This driver supports ROHM BD71837 and ROHM BD71847
291 PMICs clock gates. 291 PMICs clock gates.
292 292
293config COMMON_CLK_FIXED_MMIO
294 bool "Clock driver for Memory Mapped Fixed values"
295 depends on COMMON_CLK && OF
296 help
297 Support for Memory Mapped IO Fixed clocks
298
293source "drivers/clk/actions/Kconfig" 299source "drivers/clk/actions/Kconfig"
294source "drivers/clk/bcm/Kconfig" 300source "drivers/clk/bcm/Kconfig"
295source "drivers/clk/hisilicon/Kconfig" 301source "drivers/clk/hisilicon/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 8a9440a97500..1db133652f0c 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
27obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o 27obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
28obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o 28obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
29obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o 29obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
30obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
30obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o 31obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
31obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o 32obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
32obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o 33obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
@@ -78,7 +79,7 @@ obj-$(CONFIG_ARCH_K3) += keystone/
78obj-$(CONFIG_ARCH_KEYSTONE) += keystone/ 79obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
79obj-$(CONFIG_MACH_LOONGSON32) += loongson1/ 80obj-$(CONFIG_MACH_LOONGSON32) += loongson1/
80obj-y += mediatek/ 81obj-y += mediatek/
81obj-$(CONFIG_COMMON_CLK_AMLOGIC) += meson/ 82obj-$(CONFIG_ARCH_MESON) += meson/
82obj-$(CONFIG_MACH_PIC32) += microchip/ 83obj-$(CONFIG_MACH_PIC32) += microchip/
83ifeq ($(CONFIG_COMMON_CLK), y) 84ifeq ($(CONFIG_COMMON_CLK), y)
84obj-$(CONFIG_ARCH_MMP) += mmp/ 85obj-$(CONFIG_ARCH_MMP) += mmp/
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 2fe225a697df..3487e03d4bc6 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
144 return; 144 return;
145 145
146 at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1, 146 at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
147 nck(at91sam9x5_systemck), 147 nck(at91sam9x5_systemck), 31, 0);
148 nck(at91sam9x35_periphck), 0);
149 if (!at91sam9x5_pmc) 148 if (!at91sam9x5_pmc)
150 return; 149 return;
151 150
@@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
210 parent_names[1] = "mainck"; 209 parent_names[1] = "mainck";
211 parent_names[2] = "plladivck"; 210 parent_names[2] = "plladivck";
212 parent_names[3] = "utmick"; 211 parent_names[3] = "utmick";
213 parent_names[4] = "mck"; 212 parent_names[4] = "masterck";
214 for (i = 0; i < 2; i++) { 213 for (i = 0; i < 2; i++) {
215 char name[6]; 214 char name[6];
216 215
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index d69ad96fe988..cd0ef7274fdb 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
240 parent_names[1] = "mainck"; 240 parent_names[1] = "mainck";
241 parent_names[2] = "plladivck"; 241 parent_names[2] = "plladivck";
242 parent_names[3] = "utmick"; 242 parent_names[3] = "utmick";
243 parent_names[4] = "mck"; 243 parent_names[4] = "masterck";
244 for (i = 0; i < 3; i++) { 244 for (i = 0; i < 3; i++) {
245 char name[6]; 245 char name[6];
246 246
@@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
291 parent_names[1] = "mainck"; 291 parent_names[1] = "mainck";
292 parent_names[2] = "plladivck"; 292 parent_names[2] = "plladivck";
293 parent_names[3] = "utmick"; 293 parent_names[3] = "utmick";
294 parent_names[4] = "mck"; 294 parent_names[4] = "masterck";
295 parent_names[5] = "audiopll_pmcck"; 295 parent_names[5] = "audiopll_pmcck";
296 for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) { 296 for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
297 hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, 297 hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index e358be7f6c8d..b645a9d59cdb 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
207 parent_names[1] = "mainck"; 207 parent_names[1] = "mainck";
208 parent_names[2] = "plladivck"; 208 parent_names[2] = "plladivck";
209 parent_names[3] = "utmick"; 209 parent_names[3] = "utmick";
210 parent_names[4] = "mck"; 210 parent_names[4] = "masterck";
211 for (i = 0; i < 3; i++) { 211 for (i = 0; i < 3; i++) {
212 char name[6]; 212 char name[6];
213 213
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index 2c04396402ab..c36c47bdba02 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -44,21 +44,21 @@ struct clps711x_clk {
44 struct clk_hw_onecell_data clk_data; 44 struct clk_hw_onecell_data clk_data;
45}; 45};
46 46
47static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base, 47static void __init clps711x_clk_init_dt(struct device_node *np)
48 u32 fref)
49{ 48{
50 u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi; 49 u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi, fref = 0;
51 struct clps711x_clk *clps711x_clk; 50 struct clps711x_clk *clps711x_clk;
52 unsigned i; 51 void __iomem *base;
52
53 WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
53 54
54 if (!base) 55 base = of_iomap(np, 0);
55 return ERR_PTR(-ENOMEM); 56 BUG_ON(!base);
56 57
57 clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws, 58 clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws,
58 CLPS711X_CLK_MAX), 59 CLPS711X_CLK_MAX),
59 GFP_KERNEL); 60 GFP_KERNEL);
60 if (!clps711x_clk) 61 BUG_ON(!clps711x_clk);
61 return ERR_PTR(-ENOMEM);
62 62
63 spin_lock_init(&clps711x_clk->lock); 63 spin_lock_init(&clps711x_clk->lock);
64 64
@@ -137,52 +137,13 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
137 clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10); 137 clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
138 clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] = 138 clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] =
139 clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64); 139 clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64);
140 for (i = 0; i < CLPS711X_CLK_MAX; i++) 140 for (tmp = 0; tmp < CLPS711X_CLK_MAX; tmp++)
141 if (IS_ERR(clps711x_clk->clk_data.hws[i])) 141 if (IS_ERR(clps711x_clk->clk_data.hws[tmp]))
142 pr_err("clk %i: register failed with %ld\n", 142 pr_err("clk %i: register failed with %ld\n",
143 i, PTR_ERR(clps711x_clk->clk_data.hws[i])); 143 tmp, PTR_ERR(clps711x_clk->clk_data.hws[tmp]));
144
145 return clps711x_clk;
146}
147
148void __init clps711x_clk_init(void __iomem *base)
149{
150 struct clps711x_clk *clps711x_clk;
151
152 clps711x_clk = _clps711x_clk_init(base, 73728000);
153
154 BUG_ON(IS_ERR(clps711x_clk));
155
156 /* Clocksource */
157 clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER1],
158 NULL, "clps711x-timer.0");
159 clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER2],
160 NULL, "clps711x-timer.1");
161
162 /* Drivers */
163 clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_PWM],
164 NULL, "clps711x-pwm");
165 clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
166 NULL, "clps711x-uart.0");
167 clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
168 NULL, "clps711x-uart.1");
169}
170
171#ifdef CONFIG_OF
172static void __init clps711x_clk_init_dt(struct device_node *np)
173{
174 void __iomem *base = of_iomap(np, 0);
175 struct clps711x_clk *clps711x_clk;
176 u32 fref = 0;
177
178 WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
179
180 clps711x_clk = _clps711x_clk_init(base, fref);
181 BUG_ON(IS_ERR(clps711x_clk));
182 144
183 clps711x_clk->clk_data.num = CLPS711X_CLK_MAX; 145 clps711x_clk->clk_data.num = CLPS711X_CLK_MAX;
184 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, 146 of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
185 &clps711x_clk->clk_data); 147 &clps711x_clk->clk_data);
186} 148}
187CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt); 149CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt);
188#endif
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index c9a86156ced8..daa1fc8fba53 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -29,6 +29,17 @@ struct clk *devm_clk_get(struct device *dev, const char *id)
29} 29}
30EXPORT_SYMBOL(devm_clk_get); 30EXPORT_SYMBOL(devm_clk_get);
31 31
32struct clk *devm_clk_get_optional(struct device *dev, const char *id)
33{
34 struct clk *clk = devm_clk_get(dev, id);
35
36 if (clk == ERR_PTR(-ENOENT))
37 return NULL;
38
39 return clk;
40}
41EXPORT_SYMBOL(devm_clk_get_optional);
42
32struct clk_bulk_devres { 43struct clk_bulk_devres {
33 struct clk_bulk_data *clks; 44 struct clk_bulk_data *clks;
34 int num_clks; 45 int num_clks;
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
new file mode 100644
index 000000000000..d1a97d971183
--- /dev/null
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -0,0 +1,101 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Memory Mapped IO Fixed clock driver
5 *
6 * Copyright (C) 2018 Cadence Design Systems, Inc.
7 *
8 * Authors:
9 * Jan Kotas <jank@cadence.com>
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/of_address.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16
17static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node)
18{
19 struct clk_hw *clk;
20 const char *clk_name = node->name;
21 void __iomem *base;
22 u32 freq;
23 int ret;
24
25 base = of_iomap(node, 0);
26 if (!base) {
27 pr_err("%pOFn: failed to map address\n", node);
28 return ERR_PTR(-EIO);
29 }
30
31 freq = readl(base);
32 iounmap(base);
33 of_property_read_string(node, "clock-output-names", &clk_name);
34
35 clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL, 0, freq);
36 if (IS_ERR(clk)) {
37 pr_err("%pOFn: failed to register fixed rate clock\n", node);
38 return clk;
39 }
40
41 ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, clk);
42 if (ret) {
43 pr_err("%pOFn: failed to add clock provider\n", node);
44 clk_hw_unregister(clk);
45 clk = ERR_PTR(ret);
46 }
47
48 return clk;
49}
50
51static void __init of_fixed_mmio_clk_setup(struct device_node *node)
52{
53 fixed_mmio_clk_setup(node);
54}
55CLK_OF_DECLARE(fixed_mmio_clk, "fixed-mmio-clock", of_fixed_mmio_clk_setup);
56
57/**
58 * This is not executed when of_fixed_mmio_clk_setup succeeded.
59 */
60static int of_fixed_mmio_clk_probe(struct platform_device *pdev)
61{
62 struct clk_hw *clk;
63
64 clk = fixed_mmio_clk_setup(pdev->dev.of_node);
65 if (IS_ERR(clk))
66 return PTR_ERR(clk);
67
68 platform_set_drvdata(pdev, clk);
69
70 return 0;
71}
72
73static int of_fixed_mmio_clk_remove(struct platform_device *pdev)
74{
75 struct clk_hw *clk = platform_get_drvdata(pdev);
76
77 of_clk_del_provider(pdev->dev.of_node);
78 clk_hw_unregister_fixed_rate(clk);
79
80 return 0;
81}
82
83static const struct of_device_id of_fixed_mmio_clk_ids[] = {
84 { .compatible = "fixed-mmio-clock" },
85 { }
86};
87MODULE_DEVICE_TABLE(of, of_fixed_mmio_clk_ids);
88
89static struct platform_driver of_fixed_mmio_clk_driver = {
90 .driver = {
91 .name = "of_fixed_mmio_clk",
92 .of_match_table = of_fixed_mmio_clk_ids,
93 },
94 .probe = of_fixed_mmio_clk_probe,
95 .remove = of_fixed_mmio_clk_remove,
96};
97module_platform_driver(of_fixed_mmio_clk_driver);
98
99MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
100MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver");
101MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 727ed8e1bb72..8e4581004695 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -293,6 +293,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
293 /* Map system registers */ 293 /* Map system registers */
294 srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs"); 294 srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
295 hb_clk->reg = of_iomap(srnp, 0); 295 hb_clk->reg = of_iomap(srnp, 0);
296 of_node_put(srnp);
296 BUG_ON(!hb_clk->reg); 297 BUG_ON(!hb_clk->reg);
297 hb_clk->reg += reg; 298 hb_clk->reg += reg;
298 299
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index 22c937644c93..3727d5472450 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -235,8 +235,9 @@ static int max77686_clk_probe(struct platform_device *pdev)
235 return ret; 235 return ret;
236 } 236 }
237 237
238 ret = clk_hw_register_clkdev(&max_clk_data->hw, 238 ret = devm_clk_hw_register_clkdev(dev, &max_clk_data->hw,
239 max_clk_data->clk_idata.name, NULL); 239 max_clk_data->clk_idata.name,
240 NULL);
240 if (ret < 0) { 241 if (ret < 0) {
241 dev_err(dev, "Failed to clkdev register: %d\n", ret); 242 dev_err(dev, "Failed to clkdev register: %d\n", ret);
242 return ret; 243 return ret;
@@ -244,8 +245,8 @@ static int max77686_clk_probe(struct platform_device *pdev)
244 } 245 }
245 246
246 if (parent->of_node) { 247 if (parent->of_node) {
247 ret = of_clk_add_hw_provider(parent->of_node, of_clk_max77686_get, 248 ret = devm_of_clk_add_hw_provider(dev, of_clk_max77686_get,
248 drv_data); 249 drv_data);
249 250
250 if (ret < 0) { 251 if (ret < 0) {
251 dev_err(dev, "Failed to register OF clock provider: %d\n", 252 dev_err(dev, "Failed to register OF clock provider: %d\n",
@@ -261,27 +262,11 @@ static int max77686_clk_probe(struct platform_device *pdev)
261 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT); 262 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT);
262 if (ret < 0) { 263 if (ret < 0) {
263 dev_err(dev, "Failed to config low-jitter: %d\n", ret); 264 dev_err(dev, "Failed to config low-jitter: %d\n", ret);
264 goto remove_of_clk_provider; 265 return ret;
265 } 266 }
266 } 267 }
267 268
268 return 0; 269 return 0;
269
270remove_of_clk_provider:
271 if (parent->of_node)
272 of_clk_del_provider(parent->of_node);
273
274 return ret;
275}
276
277static int max77686_clk_remove(struct platform_device *pdev)
278{
279 struct device *parent = pdev->dev.parent;
280
281 if (parent->of_node)
282 of_clk_del_provider(parent->of_node);
283
284 return 0;
285} 270}
286 271
287static const struct platform_device_id max77686_clk_id[] = { 272static const struct platform_device_id max77686_clk_id[] = {
@@ -297,7 +282,6 @@ static struct platform_driver max77686_clk_driver = {
297 .name = "max77686-clk", 282 .name = "max77686-clk",
298 }, 283 },
299 .probe = max77686_clk_probe, 284 .probe = max77686_clk_probe,
300 .remove = max77686_clk_remove,
301 .id_table = max77686_clk_id, 285 .id_table = max77686_clk_id,
302}; 286};
303 287
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 5baa9e051110..0e84f6dfa54e 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -1389,6 +1389,7 @@ static void __init clockgen_init(struct device_node *np)
1389 pr_err("%s: Couldn't map %pOF regs\n", __func__, 1389 pr_err("%s: Couldn't map %pOF regs\n", __func__,
1390 guts); 1390 guts);
1391 } 1391 }
1392 of_node_put(guts);
1392 } 1393 }
1393 1394
1394 } 1395 }
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index ea846f77750b..0cad5748bf0e 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
41 return pdmclk->enabled; 41 return pdmclk->enabled;
42} 42}
43 43
44static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
45 unsigned int reg)
46{
47 const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */
48 int ret;
49
50 ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
51 if (ret < 0)
52 return ret;
53
54 ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
55 if (ret < 0)
56 return ret;
57
58 return 0;
59}
60
61/*
62 * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
63 * Cold Temperature". This affects cold boot and deeper idle states it
64 * seems. The workaround consists of resetting HPPLL and LPPLL.
65 */
66static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
67{
68 int ret;
69
70 ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
71 if (ret)
72 return ret;
73
74 ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
75 if (ret)
76 return ret;
77
78 return 0;
79}
80
44static int twl6040_pdmclk_prepare(struct clk_hw *hw) 81static int twl6040_pdmclk_prepare(struct clk_hw *hw)
45{ 82{
46 struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk, 83 struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
@@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw)
48 int ret; 85 int ret;
49 86
50 ret = twl6040_power(pdmclk->twl6040, 1); 87 ret = twl6040_power(pdmclk->twl6040, 1);
51 if (!ret) 88 if (ret)
52 pdmclk->enabled = 1; 89 return ret;
90
91 ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
92 if (ret)
93 goto out_err;
94
95 pdmclk->enabled = 1;
96
97 return 0;
98
99out_err:
100 dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
101 twl6040_power(pdmclk->twl6040, 0);
53 102
54 return ret; 103 return ret;
55} 104}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index d2477a5058ac..af3882f04080 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -394,16 +394,19 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
394{ 394{
395 return clk_core_is_prepared(hw->core); 395 return clk_core_is_prepared(hw->core);
396} 396}
397EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
397 398
398bool clk_hw_rate_is_protected(const struct clk_hw *hw) 399bool clk_hw_rate_is_protected(const struct clk_hw *hw)
399{ 400{
400 return clk_core_rate_is_protected(hw->core); 401 return clk_core_rate_is_protected(hw->core);
401} 402}
403EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
402 404
403bool clk_hw_is_enabled(const struct clk_hw *hw) 405bool clk_hw_is_enabled(const struct clk_hw *hw)
404{ 406{
405 return clk_core_is_enabled(hw->core); 407 return clk_core_is_enabled(hw->core);
406} 408}
409EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
407 410
408bool __clk_is_enabled(struct clk *clk) 411bool __clk_is_enabled(struct clk *clk)
409{ 412{
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 9ab3db8b3988..4cfe39636105 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -52,6 +52,12 @@ struct clk *of_clk_get(struct device_node *np, int index)
52} 52}
53EXPORT_SYMBOL(of_clk_get); 53EXPORT_SYMBOL(of_clk_get);
54 54
55/*
56 * Beware the return values when np is valid, but no clock provider is found.
57 * If name == NULL, the function returns -ENOENT.
58 * If name != NULL, the function returns -EINVAL. This is because __of_clk_get()
59 * is called even if of_property_match_string() returns an error.
60 */
55static struct clk *__of_clk_get_by_name(struct device_node *np, 61static struct clk *__of_clk_get_by_name(struct device_node *np,
56 const char *dev_id, 62 const char *dev_id,
57 const char *name) 63 const char *name)
@@ -401,6 +407,23 @@ static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
401 return cl; 407 return cl;
402} 408}
403 409
410static int do_clk_register_clkdev(struct clk_hw *hw,
411 struct clk_lookup **cl, const char *con_id, const char *dev_id)
412{
413 if (IS_ERR(hw))
414 return PTR_ERR(hw);
415 /*
416 * Since dev_id can be NULL, and NULL is handled specially, we must
417 * pass it as either a NULL format string, or with "%s".
418 */
419 if (dev_id)
420 *cl = __clk_register_clkdev(hw, con_id, "%s", dev_id);
421 else
422 *cl = __clk_register_clkdev(hw, con_id, NULL);
423
424 return *cl ? 0 : -ENOMEM;
425}
426
404/** 427/**
405 * clk_register_clkdev - register one clock lookup for a struct clk 428 * clk_register_clkdev - register one clock lookup for a struct clk
406 * @clk: struct clk to associate with all clk_lookups 429 * @clk: struct clk to associate with all clk_lookups
@@ -423,17 +446,8 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
423 if (IS_ERR(clk)) 446 if (IS_ERR(clk))
424 return PTR_ERR(clk); 447 return PTR_ERR(clk);
425 448
426 /* 449 return do_clk_register_clkdev(__clk_get_hw(clk), &cl, con_id,
427 * Since dev_id can be NULL, and NULL is handled specially, we must 450 dev_id);
428 * pass it as either a NULL format string, or with "%s".
429 */
430 if (dev_id)
431 cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, "%s",
432 dev_id);
433 else
434 cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, NULL);
435
436 return cl ? 0 : -ENOMEM;
437} 451}
438EXPORT_SYMBOL(clk_register_clkdev); 452EXPORT_SYMBOL(clk_register_clkdev);
439 453
@@ -456,18 +470,75 @@ int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
456{ 470{
457 struct clk_lookup *cl; 471 struct clk_lookup *cl;
458 472
459 if (IS_ERR(hw)) 473 return do_clk_register_clkdev(hw, &cl, con_id, dev_id);
460 return PTR_ERR(hw); 474}
475EXPORT_SYMBOL(clk_hw_register_clkdev);
461 476
462 /* 477static void devm_clkdev_release(struct device *dev, void *res)
463 * Since dev_id can be NULL, and NULL is handled specially, we must 478{
464 * pass it as either a NULL format string, or with "%s". 479 clkdev_drop(*(struct clk_lookup **)res);
465 */ 480}
466 if (dev_id) 481
467 cl = __clk_register_clkdev(hw, con_id, "%s", dev_id); 482static int devm_clk_match_clkdev(struct device *dev, void *res, void *data)
468 else 483{
469 cl = __clk_register_clkdev(hw, con_id, NULL); 484 struct clk_lookup **l = res;
470 485
471 return cl ? 0 : -ENOMEM; 486 return *l == data;
472} 487}
473EXPORT_SYMBOL(clk_hw_register_clkdev); 488
489/**
490 * devm_clk_release_clkdev - Resource managed clkdev lookup release
491 * @dev: device this lookup is bound
492 * @con_id: connection ID string on device
493 * @dev_id: format string describing device name
494 *
495 * Drop the clkdev lookup created with devm_clk_hw_register_clkdev.
496 * Normally this function will not need to be called and the resource
497 * management code will ensure that the resource is freed.
498 */
499void devm_clk_release_clkdev(struct device *dev, const char *con_id,
500 const char *dev_id)
501{
502 struct clk_lookup *cl;
503 int rval;
504
505 cl = clk_find(dev_id, con_id);
506 WARN_ON(!cl);
507 rval = devres_release(dev, devm_clkdev_release,
508 devm_clk_match_clkdev, cl);
509 WARN_ON(rval);
510}
511EXPORT_SYMBOL(devm_clk_release_clkdev);
512
513/**
514 * devm_clk_hw_register_clkdev - managed clk lookup registration for clk_hw
515 * @dev: device this lookup is bound
516 * @hw: struct clk_hw to associate with all clk_lookups
517 * @con_id: connection ID string on device
518 * @dev_id: format string describing device name
519 *
520 * con_id or dev_id may be NULL as a wildcard, just as in the rest of
521 * clkdev.
522 *
523 * To make things easier for mass registration, we detect error clk_hws
524 * from a previous clk_hw_register_*() call, and return the error code for
525 * those. This is to permit this function to be called immediately
526 * after clk_hw_register_*().
527 */
528int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
529 const char *con_id, const char *dev_id)
530{
531 int rval = -ENOMEM;
532 struct clk_lookup **cl;
533
534 cl = devres_alloc(devm_clkdev_release, sizeof(*cl), GFP_KERNEL);
535 if (cl) {
536 rval = do_clk_register_clkdev(hw, cl, con_id, dev_id);
537 if (!rval)
538 devres_add(dev, cl);
539 else
540 devres_free(cl);
541 }
542 return rval;
543}
544EXPORT_SYMBOL(devm_clk_hw_register_clkdev);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 716eac3136b4..708e7c5590dd 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -471,6 +471,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
471 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop"); 471 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
472 anatop_base = base = of_iomap(np, 0); 472 anatop_base = base = of_iomap(np, 0);
473 WARN_ON(!base); 473 WARN_ON(!base);
474 of_node_put(np);
474 475
475 /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */ 476 /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
476 if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) { 477 if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) {
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index 18527a335ace..91558b09bf9e 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -151,6 +151,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
151 np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop"); 151 np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop");
152 base = of_iomap(np, 0); 152 base = of_iomap(np, 0);
153 WARN_ON(!base); 153 WARN_ON(!base);
154 of_node_put(np);
154 155
155 clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); 156 clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
156 clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); 157 clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 06c105d580a4..cfbd8d4edb85 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -404,6 +404,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
404 np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop"); 404 np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop");
405 base = of_iomap(np, 0); 405 base = of_iomap(np, 0);
406 WARN_ON(!base); 406 WARN_ON(!base);
407 of_node_put(np);
407 408
408 clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); 409 clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
409 clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); 410 clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 4e18f629f823..ce306631e844 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -48,8 +48,8 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
48 struct clk_hw **clks; 48 struct clk_hw **clks;
49 void __iomem *base; 49 void __iomem *base;
50 50
51 clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * 51 clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SCG1_END),
52 IMX7ULP_CLK_SCG1_END, GFP_KERNEL); 52 GFP_KERNEL);
53 if (!clk_data) 53 if (!clk_data)
54 return; 54 return;
55 55
@@ -136,8 +136,8 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
136 struct clk_hw **clks; 136 struct clk_hw **clks;
137 void __iomem *base; 137 void __iomem *base;
138 138
139 clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * 139 clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
140 IMX7ULP_CLK_PCC2_END, GFP_KERNEL); 140 GFP_KERNEL);
141 if (!clk_data) 141 if (!clk_data)
142 return; 142 return;
143 143
@@ -183,8 +183,8 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
183 struct clk_hw **clks; 183 struct clk_hw **clks;
184 void __iomem *base; 184 void __iomem *base;
185 185
186 clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * 186 clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
187 IMX7ULP_CLK_PCC3_END, GFP_KERNEL); 187 GFP_KERNEL);
188 if (!clk_data) 188 if (!clk_data)
189 return; 189 return;
190 190
@@ -228,8 +228,8 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np)
228 struct clk_hw **clks; 228 struct clk_hw **clks;
229 void __iomem *base; 229 void __iomem *base;
230 230
231 clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * 231 clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SMC1_END),
232 IMX7ULP_CLK_SMC1_END, GFP_KERNEL); 232 GFP_KERNEL);
233 if (!clk_data) 233 if (!clk_data)
234 return; 234 return;
235 235
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 6dae54325a91..a334667c450a 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -203,6 +203,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
203 np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop"); 203 np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop");
204 anatop_base = of_iomap(np, 0); 204 anatop_base = of_iomap(np, 0);
205 BUG_ON(!anatop_base); 205 BUG_ON(!anatop_base);
206 of_node_put(np);
206 207
207 np = ccm_node; 208 np = ccm_node;
208 ccm_base = of_iomap(np, 0); 209 ccm_base = of_iomap(np, 0);
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index efaa70f682b4..3858747f5438 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -1,27 +1,52 @@
1config COMMON_CLK_AMLOGIC 1config COMMON_CLK_MESON_INPUT
2 bool 2 tristate
3 depends on ARCH_MESON || COMPILE_TEST
4 select COMMON_CLK_REGMAP_MESON
5 3
6config COMMON_CLK_AMLOGIC_AUDIO 4config COMMON_CLK_MESON_REGMAP
7 bool 5 tristate
8 depends on ARCH_MESON || COMPILE_TEST 6 select REGMAP
9 select COMMON_CLK_AMLOGIC
10 7
11config COMMON_CLK_MESON_AO 8config COMMON_CLK_MESON_DUALDIV
12 bool 9 tristate
13 depends on OF 10 select COMMON_CLK_MESON_REGMAP
14 depends on ARCH_MESON || COMPILE_TEST 11
15 select COMMON_CLK_REGMAP_MESON 12config COMMON_CLK_MESON_MPLL
13 tristate
14 select COMMON_CLK_MESON_REGMAP
15
16config COMMON_CLK_MESON_PHASE
17 tristate
18 select COMMON_CLK_MESON_REGMAP
19
20config COMMON_CLK_MESON_PLL
21 tristate
22 select COMMON_CLK_MESON_REGMAP
23
24config COMMON_CLK_MESON_SCLK_DIV
25 tristate
26 select COMMON_CLK_MESON_REGMAP
27
28config COMMON_CLK_MESON_VID_PLL_DIV
29 tristate
30 select COMMON_CLK_MESON_REGMAP
31
32config COMMON_CLK_MESON_AO_CLKC
33 tristate
34 select COMMON_CLK_MESON_REGMAP
35 select COMMON_CLK_MESON_INPUT
16 select RESET_CONTROLLER 36 select RESET_CONTROLLER
17 37
18config COMMON_CLK_REGMAP_MESON 38config COMMON_CLK_MESON_EE_CLKC
19 bool 39 tristate
20 select REGMAP 40 select COMMON_CLK_MESON_REGMAP
41 select COMMON_CLK_MESON_INPUT
21 42
22config COMMON_CLK_MESON8B 43config COMMON_CLK_MESON8B
23 bool 44 bool
24 select COMMON_CLK_AMLOGIC 45 depends on ARCH_MESON
46 select COMMON_CLK_MESON_REGMAP
47 select COMMON_CLK_MESON_MPLL
48 select COMMON_CLK_MESON_PLL
49 select MFD_SYSCON
25 select RESET_CONTROLLER 50 select RESET_CONTROLLER
26 help 51 help
27 Support for the clock controller on AmLogic S802 (Meson8), 52 Support for the clock controller on AmLogic S802 (Meson8),
@@ -30,8 +55,14 @@ config COMMON_CLK_MESON8B
30 55
31config COMMON_CLK_GXBB 56config COMMON_CLK_GXBB
32 bool 57 bool
33 select COMMON_CLK_AMLOGIC 58 depends on ARCH_MESON
34 select COMMON_CLK_MESON_AO 59 select COMMON_CLK_MESON_REGMAP
60 select COMMON_CLK_MESON_DUALDIV
61 select COMMON_CLK_MESON_VID_PLL_DIV
62 select COMMON_CLK_MESON_MPLL
63 select COMMON_CLK_MESON_PLL
64 select COMMON_CLK_MESON_AO_CLKC
65 select COMMON_CLK_MESON_EE_CLKC
35 select MFD_SYSCON 66 select MFD_SYSCON
36 help 67 help
37 Support for the clock controller on AmLogic S905 devices, aka gxbb. 68 Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -39,8 +70,13 @@ config COMMON_CLK_GXBB
39 70
40config COMMON_CLK_AXG 71config COMMON_CLK_AXG
41 bool 72 bool
42 select COMMON_CLK_AMLOGIC 73 depends on ARCH_MESON
43 select COMMON_CLK_MESON_AO 74 select COMMON_CLK_MESON_REGMAP
75 select COMMON_CLK_MESON_DUALDIV
76 select COMMON_CLK_MESON_MPLL
77 select COMMON_CLK_MESON_PLL
78 select COMMON_CLK_MESON_AO_CLKC
79 select COMMON_CLK_MESON_EE_CLKC
44 select MFD_SYSCON 80 select MFD_SYSCON
45 help 81 help
46 Support for the clock controller on AmLogic A113D devices, aka axg. 82 Support for the clock controller on AmLogic A113D devices, aka axg.
@@ -48,9 +84,26 @@ config COMMON_CLK_AXG
48 84
49config COMMON_CLK_AXG_AUDIO 85config COMMON_CLK_AXG_AUDIO
50 tristate "Meson AXG Audio Clock Controller Driver" 86 tristate "Meson AXG Audio Clock Controller Driver"
51 depends on COMMON_CLK_AXG 87 depends on ARCH_MESON
52 select COMMON_CLK_AMLOGIC_AUDIO 88 select COMMON_CLK_MESON_INPUT
53 select MFD_SYSCON 89 select COMMON_CLK_MESON_REGMAP
90 select COMMON_CLK_MESON_PHASE
91 select COMMON_CLK_MESON_SCLK_DIV
92 select REGMAP_MMIO
54 help 93 help
55 Support for the audio clock controller on AmLogic A113D devices, 94 Support for the audio clock controller on AmLogic A113D devices,
56 aka axg, Say Y if you want audio subsystem to work. 95 aka axg, Say Y if you want audio subsystem to work.
96
97config COMMON_CLK_G12A
98 bool
99 depends on ARCH_MESON
100 select COMMON_CLK_MESON_REGMAP
101 select COMMON_CLK_MESON_DUALDIV
102 select COMMON_CLK_MESON_MPLL
103 select COMMON_CLK_MESON_PLL
104 select COMMON_CLK_MESON_AO_CLKC
105 select COMMON_CLK_MESON_EE_CLKC
106 select MFD_SYSCON
107 help
108 Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2
109 devices, aka g12a. Say Y if you want peripherals to work.
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index a849aa809825..021fc290e749 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -1,13 +1,20 @@
1# 1# Amlogic clock drivers
2# Makefile for Meson specific clk
3#
4 2
5obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-phase.o vid-pll-div.o 3obj-$(CONFIG_COMMON_CLK_MESON_AO_CLKC) += meson-aoclk.o
6obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-input.o 4obj-$(CONFIG_COMMON_CLK_MESON_DUALDIV) += clk-dualdiv.o
7obj-$(CONFIG_COMMON_CLK_AMLOGIC_AUDIO) += clk-triphase.o sclk-div.o 5obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o
8obj-$(CONFIG_COMMON_CLK_MESON_AO) += meson-aoclk.o 6obj-$(CONFIG_COMMON_CLK_MESON_INPUT) += clk-input.o
7obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o
8obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o
9obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
10obj-$(CONFIG_COMMON_CLK_MESON_REGMAP) += clk-regmap.o
11obj-$(CONFIG_COMMON_CLK_MESON_SCLK_DIV) += sclk-div.o
12obj-$(CONFIG_COMMON_CLK_MESON_VID_PLL_DIV) += vid-pll-div.o
13
14# Amlogic Clock controllers
15
16obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
17obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
18obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o
19obj-$(CONFIG_COMMON_CLK_G12A) += g12a.o g12a-aoclk.o
9obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o 20obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
10obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o
11obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
12obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
13obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index 29e088542387..0086f31288eb 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -12,10 +12,27 @@
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/reset-controller.h> 13#include <linux/reset-controller.h>
14#include <linux/mfd/syscon.h> 14#include <linux/mfd/syscon.h>
15#include "clk-regmap.h"
16#include "meson-aoclk.h" 15#include "meson-aoclk.h"
17#include "axg-aoclk.h" 16#include "axg-aoclk.h"
18 17
18#include "clk-regmap.h"
19#include "clk-dualdiv.h"
20
21#define IN_PREFIX "ao-in-"
22
23/*
24 * AO Configuration Clock registers offsets
25 * Register offsets from the data sheet must be multiplied by 4.
26 */
27#define AO_RTI_PWR_CNTL_REG1 0x0C
28#define AO_RTI_PWR_CNTL_REG0 0x10
29#define AO_RTI_GEN_CNTL_REG0 0x40
30#define AO_OSCIN_CNTL 0x58
31#define AO_CRT_CLK_CNTL1 0x68
32#define AO_SAR_CLK 0x90
33#define AO_RTC_ALT_CLK_CNTL0 0x94
34#define AO_RTC_ALT_CLK_CNTL1 0x98
35
19#define AXG_AO_GATE(_name, _bit) \ 36#define AXG_AO_GATE(_name, _bit) \
20static struct clk_regmap axg_aoclk_##_name = { \ 37static struct clk_regmap axg_aoclk_##_name = { \
21 .data = &(struct clk_regmap_gate_data) { \ 38 .data = &(struct clk_regmap_gate_data) { \
@@ -25,7 +42,7 @@ static struct clk_regmap axg_aoclk_##_name = { \
25 .hw.init = &(struct clk_init_data) { \ 42 .hw.init = &(struct clk_init_data) { \
26 .name = "axg_ao_" #_name, \ 43 .name = "axg_ao_" #_name, \
27 .ops = &clk_regmap_gate_ops, \ 44 .ops = &clk_regmap_gate_ops, \
28 .parent_names = (const char *[]){ "clk81" }, \ 45 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
29 .num_parents = 1, \ 46 .num_parents = 1, \
30 .flags = CLK_IGNORE_UNUSED, \ 47 .flags = CLK_IGNORE_UNUSED, \
31 }, \ 48 }, \
@@ -39,17 +56,141 @@ AXG_AO_GATE(uart2, 5);
39AXG_AO_GATE(ir_blaster, 6); 56AXG_AO_GATE(ir_blaster, 6);
40AXG_AO_GATE(saradc, 7); 57AXG_AO_GATE(saradc, 7);
41 58
59static struct clk_regmap axg_aoclk_cts_oscin = {
60 .data = &(struct clk_regmap_gate_data){
61 .offset = AO_RTI_PWR_CNTL_REG0,
62 .bit_idx = 14,
63 },
64 .hw.init = &(struct clk_init_data){
65 .name = "cts_oscin",
66 .ops = &clk_regmap_gate_ro_ops,
67 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
68 .num_parents = 1,
69 },
70};
71
72static struct clk_regmap axg_aoclk_32k_pre = {
73 .data = &(struct clk_regmap_gate_data){
74 .offset = AO_RTC_ALT_CLK_CNTL0,
75 .bit_idx = 31,
76 },
77 .hw.init = &(struct clk_init_data){
78 .name = "axg_ao_32k_pre",
79 .ops = &clk_regmap_gate_ops,
80 .parent_names = (const char *[]){ "cts_oscin" },
81 .num_parents = 1,
82 },
83};
84
85static const struct meson_clk_dualdiv_param axg_32k_div_table[] = {
86 {
87 .dual = 1,
88 .n1 = 733,
89 .m1 = 8,
90 .n2 = 732,
91 .m2 = 11,
92 }, {}
93};
94
95static struct clk_regmap axg_aoclk_32k_div = {
96 .data = &(struct meson_clk_dualdiv_data){
97 .n1 = {
98 .reg_off = AO_RTC_ALT_CLK_CNTL0,
99 .shift = 0,
100 .width = 12,
101 },
102 .n2 = {
103 .reg_off = AO_RTC_ALT_CLK_CNTL0,
104 .shift = 12,
105 .width = 12,
106 },
107 .m1 = {
108 .reg_off = AO_RTC_ALT_CLK_CNTL1,
109 .shift = 0,
110 .width = 12,
111 },
112 .m2 = {
113 .reg_off = AO_RTC_ALT_CLK_CNTL1,
114 .shift = 12,
115 .width = 12,
116 },
117 .dual = {
118 .reg_off = AO_RTC_ALT_CLK_CNTL0,
119 .shift = 28,
120 .width = 1,
121 },
122 .table = axg_32k_div_table,
123 },
124 .hw.init = &(struct clk_init_data){
125 .name = "axg_ao_32k_div",
126 .ops = &meson_clk_dualdiv_ops,
127 .parent_names = (const char *[]){ "axg_ao_32k_pre" },
128 .num_parents = 1,
129 },
130};
131
132static struct clk_regmap axg_aoclk_32k_sel = {
133 .data = &(struct clk_regmap_mux_data) {
134 .offset = AO_RTC_ALT_CLK_CNTL1,
135 .mask = 0x1,
136 .shift = 24,
137 .flags = CLK_MUX_ROUND_CLOSEST,
138 },
139 .hw.init = &(struct clk_init_data){
140 .name = "axg_ao_32k_sel",
141 .ops = &clk_regmap_mux_ops,
142 .parent_names = (const char *[]){ "axg_ao_32k_div",
143 "axg_ao_32k_pre" },
144 .num_parents = 2,
145 .flags = CLK_SET_RATE_PARENT,
146 },
147};
148
149static struct clk_regmap axg_aoclk_32k = {
150 .data = &(struct clk_regmap_gate_data){
151 .offset = AO_RTC_ALT_CLK_CNTL0,
152 .bit_idx = 30,
153 },
154 .hw.init = &(struct clk_init_data){
155 .name = "axg_ao_32k",
156 .ops = &clk_regmap_gate_ops,
157 .parent_names = (const char *[]){ "axg_ao_32k_sel" },
158 .num_parents = 1,
159 .flags = CLK_SET_RATE_PARENT,
160 },
161};
162
163static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
164 .data = &(struct clk_regmap_mux_data) {
165 .offset = AO_RTI_PWR_CNTL_REG0,
166 .mask = 0x1,
167 .shift = 10,
168 .flags = CLK_MUX_ROUND_CLOSEST,
169 },
170 .hw.init = &(struct clk_init_data){
171 .name = "axg_ao_cts_rtc_oscin",
172 .ops = &clk_regmap_mux_ops,
173 .parent_names = (const char *[]){ "axg_ao_32k",
174 IN_PREFIX "ext_32k-0" },
175 .num_parents = 2,
176 .flags = CLK_SET_RATE_PARENT,
177 },
178};
179
42static struct clk_regmap axg_aoclk_clk81 = { 180static struct clk_regmap axg_aoclk_clk81 = {
43 .data = &(struct clk_regmap_mux_data) { 181 .data = &(struct clk_regmap_mux_data) {
44 .offset = AO_RTI_PWR_CNTL_REG0, 182 .offset = AO_RTI_PWR_CNTL_REG0,
45 .mask = 0x1, 183 .mask = 0x1,
46 .shift = 8, 184 .shift = 8,
185 .flags = CLK_MUX_ROUND_CLOSEST,
47 }, 186 },
48 .hw.init = &(struct clk_init_data){ 187 .hw.init = &(struct clk_init_data){
49 .name = "axg_ao_clk81", 188 .name = "axg_ao_clk81",
50 .ops = &clk_regmap_mux_ro_ops, 189 .ops = &clk_regmap_mux_ro_ops,
51 .parent_names = (const char *[]){ "clk81", "ao_alt_xtal"}, 190 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
191 "axg_ao_cts_rtc_oscin"},
52 .num_parents = 2, 192 .num_parents = 2,
193 .flags = CLK_SET_RATE_PARENT,
53 }, 194 },
54}; 195};
55 196
@@ -62,7 +203,8 @@ static struct clk_regmap axg_aoclk_saradc_mux = {
62 .hw.init = &(struct clk_init_data){ 203 .hw.init = &(struct clk_init_data){
63 .name = "axg_ao_saradc_mux", 204 .name = "axg_ao_saradc_mux",
64 .ops = &clk_regmap_mux_ops, 205 .ops = &clk_regmap_mux_ops,
65 .parent_names = (const char *[]){ "xtal", "axg_ao_clk81" }, 206 .parent_names = (const char *[]){ IN_PREFIX "xtal",
207 "axg_ao_clk81" },
66 .num_parents = 2, 208 .num_parents = 2,
67 }, 209 },
68}; 210};
@@ -106,17 +248,23 @@ static const unsigned int axg_aoclk_reset[] = {
106}; 248};
107 249
108static struct clk_regmap *axg_aoclk_regmap[] = { 250static struct clk_regmap *axg_aoclk_regmap[] = {
109 [CLKID_AO_REMOTE] = &axg_aoclk_remote, 251 &axg_aoclk_remote,
110 [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master, 252 &axg_aoclk_i2c_master,
111 [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave, 253 &axg_aoclk_i2c_slave,
112 [CLKID_AO_UART1] = &axg_aoclk_uart1, 254 &axg_aoclk_uart1,
113 [CLKID_AO_UART2] = &axg_aoclk_uart2, 255 &axg_aoclk_uart2,
114 [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster, 256 &axg_aoclk_ir_blaster,
115 [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc, 257 &axg_aoclk_saradc,
116 [CLKID_AO_CLK81] = &axg_aoclk_clk81, 258 &axg_aoclk_cts_oscin,
117 [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux, 259 &axg_aoclk_32k_pre,
118 [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div, 260 &axg_aoclk_32k_div,
119 [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate, 261 &axg_aoclk_32k_sel,
262 &axg_aoclk_32k,
263 &axg_aoclk_cts_rtc_oscin,
264 &axg_aoclk_clk81,
265 &axg_aoclk_saradc_mux,
266 &axg_aoclk_saradc_div,
267 &axg_aoclk_saradc_gate,
120}; 268};
121 269
122static const struct clk_hw_onecell_data axg_aoclk_onecell_data = { 270static const struct clk_hw_onecell_data axg_aoclk_onecell_data = {
@@ -132,10 +280,22 @@ static const struct clk_hw_onecell_data axg_aoclk_onecell_data = {
132 [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw, 280 [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw,
133 [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw, 281 [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw,
134 [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw, 282 [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw,
283 [CLKID_AO_CTS_OSCIN] = &axg_aoclk_cts_oscin.hw,
284 [CLKID_AO_32K_PRE] = &axg_aoclk_32k_pre.hw,
285 [CLKID_AO_32K_DIV] = &axg_aoclk_32k_div.hw,
286 [CLKID_AO_32K_SEL] = &axg_aoclk_32k_sel.hw,
287 [CLKID_AO_32K] = &axg_aoclk_32k.hw,
288 [CLKID_AO_CTS_RTC_OSCIN] = &axg_aoclk_cts_rtc_oscin.hw,
135 }, 289 },
136 .num = NR_CLKS, 290 .num = NR_CLKS,
137}; 291};
138 292
293static const struct meson_aoclk_input axg_aoclk_inputs[] = {
294 { .name = "xtal", .required = true },
295 { .name = "mpeg-clk", .required = true },
296 { .name = "ext-32k-0", .required = false },
297};
298
139static const struct meson_aoclk_data axg_aoclkc_data = { 299static const struct meson_aoclk_data axg_aoclkc_data = {
140 .reset_reg = AO_RTI_GEN_CNTL_REG0, 300 .reset_reg = AO_RTI_GEN_CNTL_REG0,
141 .num_reset = ARRAY_SIZE(axg_aoclk_reset), 301 .num_reset = ARRAY_SIZE(axg_aoclk_reset),
@@ -143,6 +303,9 @@ static const struct meson_aoclk_data axg_aoclkc_data = {
143 .num_clks = ARRAY_SIZE(axg_aoclk_regmap), 303 .num_clks = ARRAY_SIZE(axg_aoclk_regmap),
144 .clks = axg_aoclk_regmap, 304 .clks = axg_aoclk_regmap,
145 .hw_data = &axg_aoclk_onecell_data, 305 .hw_data = &axg_aoclk_onecell_data,
306 .inputs = axg_aoclk_inputs,
307 .num_inputs = ARRAY_SIZE(axg_aoclk_inputs),
308 .input_prefix = IN_PREFIX,
146}; 309};
147 310
148static const struct of_device_id axg_aoclkc_match_table[] = { 311static const struct of_device_id axg_aoclkc_match_table[] = {
diff --git a/drivers/clk/meson/axg-aoclk.h b/drivers/clk/meson/axg-aoclk.h
index 91384d8dd844..3cc27e85170f 100644
--- a/drivers/clk/meson/axg-aoclk.h
+++ b/drivers/clk/meson/axg-aoclk.h
@@ -10,18 +10,7 @@
10#ifndef __AXG_AOCLKC_H 10#ifndef __AXG_AOCLKC_H
11#define __AXG_AOCLKC_H 11#define __AXG_AOCLKC_H
12 12
13#define NR_CLKS 11 13#define NR_CLKS 17
14/* AO Configuration Clock registers offsets
15 * Register offsets from the data sheet must be multiplied by 4.
16 */
17#define AO_RTI_PWR_CNTL_REG1 0x0C
18#define AO_RTI_PWR_CNTL_REG0 0x10
19#define AO_RTI_GEN_CNTL_REG0 0x40
20#define AO_OSCIN_CNTL 0x58
21#define AO_CRT_CLK_CNTL1 0x68
22#define AO_SAR_CLK 0x90
23#define AO_RTC_ALT_CLK_CNTL0 0x94
24#define AO_RTC_ALT_CLK_CNTL1 0x98
25 14
26#include <dt-bindings/clock/axg-aoclkc.h> 15#include <dt-bindings/clock/axg-aoclkc.h>
27#include <dt-bindings/reset/axg-aoclkc.h> 16#include <dt-bindings/reset/axg-aoclkc.h>
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 8ac3a2295473..7ab200b6c3bf 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -14,8 +14,11 @@
14#include <linux/reset.h> 14#include <linux/reset.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include "clkc-audio.h"
18#include "axg-audio.h" 17#include "axg-audio.h"
18#include "clk-input.h"
19#include "clk-regmap.h"
20#include "clk-phase.h"
21#include "sclk-div.h"
19 22
20#define AXG_MST_IN_COUNT 8 23#define AXG_MST_IN_COUNT 8
21#define AXG_SLV_SCLK_COUNT 10 24#define AXG_SLV_SCLK_COUNT 10
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 792735d7e46e..7a8ef80e5f2c 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -9,16 +9,17 @@
9 * Author: Qiufang Dai <qiufang.dai@amlogic.com> 9 * Author: Qiufang Dai <qiufang.dai@amlogic.com>
10 */ 10 */
11 11
12#include <linux/clk.h>
13#include <linux/clk-provider.h> 12#include <linux/clk-provider.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <linux/of_device.h> 14#include <linux/of_device.h>
16#include <linux/mfd/syscon.h>
17#include <linux/platform_device.h> 15#include <linux/platform_device.h>
18#include <linux/regmap.h>
19 16
20#include "clkc.h" 17#include "clk-input.h"
18#include "clk-regmap.h"
19#include "clk-pll.h"
20#include "clk-mpll.h"
21#include "axg.h" 21#include "axg.h"
22#include "meson-eeclk.h"
22 23
23static DEFINE_SPINLOCK(meson_clk_lock); 24static DEFINE_SPINLOCK(meson_clk_lock);
24 25
@@ -58,7 +59,7 @@ static struct clk_regmap axg_fixed_pll_dco = {
58 .hw.init = &(struct clk_init_data){ 59 .hw.init = &(struct clk_init_data){
59 .name = "fixed_pll_dco", 60 .name = "fixed_pll_dco",
60 .ops = &meson_clk_pll_ro_ops, 61 .ops = &meson_clk_pll_ro_ops,
61 .parent_names = (const char *[]){ "xtal" }, 62 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
62 .num_parents = 1, 63 .num_parents = 1,
63 }, 64 },
64}; 65};
@@ -113,7 +114,7 @@ static struct clk_regmap axg_sys_pll_dco = {
113 .hw.init = &(struct clk_init_data){ 114 .hw.init = &(struct clk_init_data){
114 .name = "sys_pll_dco", 115 .name = "sys_pll_dco",
115 .ops = &meson_clk_pll_ro_ops, 116 .ops = &meson_clk_pll_ro_ops,
116 .parent_names = (const char *[]){ "xtal" }, 117 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
117 .num_parents = 1, 118 .num_parents = 1,
118 }, 119 },
119}; 120};
@@ -214,7 +215,7 @@ static struct clk_regmap axg_gp0_pll_dco = {
214 .hw.init = &(struct clk_init_data){ 215 .hw.init = &(struct clk_init_data){
215 .name = "gp0_pll_dco", 216 .name = "gp0_pll_dco",
216 .ops = &meson_clk_pll_ops, 217 .ops = &meson_clk_pll_ops,
217 .parent_names = (const char *[]){ "xtal" }, 218 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
218 .num_parents = 1, 219 .num_parents = 1,
219 }, 220 },
220}; 221};
@@ -283,7 +284,7 @@ static struct clk_regmap axg_hifi_pll_dco = {
283 .hw.init = &(struct clk_init_data){ 284 .hw.init = &(struct clk_init_data){
284 .name = "hifi_pll_dco", 285 .name = "hifi_pll_dco",
285 .ops = &meson_clk_pll_ops, 286 .ops = &meson_clk_pll_ops,
286 .parent_names = (const char *[]){ "xtal" }, 287 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
287 .num_parents = 1, 288 .num_parents = 1,
288 }, 289 },
289}; 290};
@@ -701,7 +702,7 @@ static struct clk_regmap axg_pcie_pll_dco = {
701 .hw.init = &(struct clk_init_data){ 702 .hw.init = &(struct clk_init_data){
702 .name = "pcie_pll_dco", 703 .name = "pcie_pll_dco",
703 .ops = &meson_clk_pll_ops, 704 .ops = &meson_clk_pll_ops,
704 .parent_names = (const char *[]){ "xtal" }, 705 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
705 .num_parents = 1, 706 .num_parents = 1,
706 }, 707 },
707}; 708};
@@ -803,7 +804,7 @@ static struct clk_regmap axg_pcie_cml_en1 = {
803 804
804static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 }; 805static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
805static const char * const clk81_parent_names[] = { 806static const char * const clk81_parent_names[] = {
806 "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4", 807 IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
807 "fclk_div3", "fclk_div5" 808 "fclk_div3", "fclk_div5"
808}; 809};
809 810
@@ -852,7 +853,7 @@ static struct clk_regmap axg_clk81 = {
852}; 853};
853 854
854static const char * const axg_sd_emmc_clk0_parent_names[] = { 855static const char * const axg_sd_emmc_clk0_parent_names[] = {
855 "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7", 856 IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
856 857
857 /* 858 /*
858 * Following these parent clocks, we should also have had mpll2, mpll3 859 * Following these parent clocks, we should also have had mpll2, mpll3
@@ -957,7 +958,7 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
957static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8, 958static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
958 9, 10, 11, 13, 14, }; 959 9, 10, 11, 13, 14, };
959static const char * const gen_clk_parent_names[] = { 960static const char * const gen_clk_parent_names[] = {
960 "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3", 961 IN_PREFIX "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3",
961 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll", 962 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
962}; 963};
963 964
@@ -1255,46 +1256,20 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
1255 &axg_pcie_pll_od, 1256 &axg_pcie_pll_od,
1256}; 1257};
1257 1258
1259static const struct meson_eeclkc_data axg_clkc_data = {
1260 .regmap_clks = axg_clk_regmaps,
1261 .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
1262 .hw_onecell_data = &axg_hw_onecell_data,
1263};
1264
1265
1258static const struct of_device_id clkc_match_table[] = { 1266static const struct of_device_id clkc_match_table[] = {
1259 { .compatible = "amlogic,axg-clkc" }, 1267 { .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
1260 {} 1268 {}
1261}; 1269};
1262 1270
1263static int axg_clkc_probe(struct platform_device *pdev)
1264{
1265 struct device *dev = &pdev->dev;
1266 struct regmap *map;
1267 int ret, i;
1268
1269 /* Get the hhi system controller node if available */
1270 map = syscon_node_to_regmap(of_get_parent(dev->of_node));
1271 if (IS_ERR(map)) {
1272 dev_err(dev, "failed to get HHI regmap\n");
1273 return PTR_ERR(map);
1274 }
1275
1276 /* Populate regmap for the regmap backed clocks */
1277 for (i = 0; i < ARRAY_SIZE(axg_clk_regmaps); i++)
1278 axg_clk_regmaps[i]->map = map;
1279
1280 for (i = 0; i < axg_hw_onecell_data.num; i++) {
1281 /* array might be sparse */
1282 if (!axg_hw_onecell_data.hws[i])
1283 continue;
1284
1285 ret = devm_clk_hw_register(dev, axg_hw_onecell_data.hws[i]);
1286 if (ret) {
1287 dev_err(dev, "Clock registration failed\n");
1288 return ret;
1289 }
1290 }
1291
1292 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
1293 &axg_hw_onecell_data);
1294}
1295
1296static struct platform_driver axg_driver = { 1271static struct platform_driver axg_driver = {
1297 .probe = axg_clkc_probe, 1272 .probe = meson_eeclkc_probe,
1298 .driver = { 1273 .driver = {
1299 .name = "axg-clkc", 1274 .name = "axg-clkc",
1300 .of_match_table = clkc_match_table, 1275 .of_match_table = clkc_match_table,
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
new file mode 100644
index 000000000000..c5ca23a5e3e8
--- /dev/null
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -0,0 +1,138 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2017 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 * Author: Jerome Brunet <jbrunet@baylibre.com>
6 */
7
8/*
9 * The AO Domain embeds a dual/divider to generate a more precise
10 * 32,768KHz clock for low-power suspend mode and CEC.
11 * ______ ______
12 * | | | |
13 * | Div1 |-| Cnt1 |
14 * /|______| |______|\
15 * -| ______ ______ X--> Out
16 * \| | | |/
17 * | Div2 |-| Cnt2 |
18 * |______| |______|
19 *
20 * The dividing can be switched to single or dual, with a counter
21 * for each divider to set when the switching is done.
22 */
23
24#include <linux/clk-provider.h>
25#include <linux/module.h>
26
27#include "clk-regmap.h"
28#include "clk-dualdiv.h"
29
30static inline struct meson_clk_dualdiv_data *
31meson_clk_dualdiv_data(struct clk_regmap *clk)
32{
33 return (struct meson_clk_dualdiv_data *)clk->data;
34}
35
36static unsigned long
37__dualdiv_param_to_rate(unsigned long parent_rate,
38 const struct meson_clk_dualdiv_param *p)
39{
40 if (!p->dual)
41 return DIV_ROUND_CLOSEST(parent_rate, p->n1);
42
43 return DIV_ROUND_CLOSEST(parent_rate * (p->m1 + p->m2),
44 p->n1 * p->m1 + p->n2 * p->m2);
45}
46
47static unsigned long meson_clk_dualdiv_recalc_rate(struct clk_hw *hw,
48 unsigned long parent_rate)
49{
50 struct clk_regmap *clk = to_clk_regmap(hw);
51 struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
52 struct meson_clk_dualdiv_param setting;
53
54 setting.dual = meson_parm_read(clk->map, &dualdiv->dual);
55 setting.n1 = meson_parm_read(clk->map, &dualdiv->n1) + 1;
56 setting.m1 = meson_parm_read(clk->map, &dualdiv->m1) + 1;
57 setting.n2 = meson_parm_read(clk->map, &dualdiv->n2) + 1;
58 setting.m2 = meson_parm_read(clk->map, &dualdiv->m2) + 1;
59
60 return __dualdiv_param_to_rate(parent_rate, &setting);
61}
62
63static const struct meson_clk_dualdiv_param *
64__dualdiv_get_setting(unsigned long rate, unsigned long parent_rate,
65 struct meson_clk_dualdiv_data *dualdiv)
66{
67 const struct meson_clk_dualdiv_param *table = dualdiv->table;
68 unsigned long best = 0, now = 0;
69 unsigned int i, best_i = 0;
70
71 if (!table)
72 return NULL;
73
74 for (i = 0; table[i].n1; i++) {
75 now = __dualdiv_param_to_rate(parent_rate, &table[i]);
76
77 /* If we get an exact match, don't bother any further */
78 if (now == rate) {
79 return &table[i];
80 } else if (abs(now - rate) < abs(best - rate)) {
81 best = now;
82 best_i = i;
83 }
84 }
85
86 return (struct meson_clk_dualdiv_param *)&table[best_i];
87}
88
89static long meson_clk_dualdiv_round_rate(struct clk_hw *hw, unsigned long rate,
90 unsigned long *parent_rate)
91{
92 struct clk_regmap *clk = to_clk_regmap(hw);
93 struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
94 const struct meson_clk_dualdiv_param *setting =
95 __dualdiv_get_setting(rate, *parent_rate, dualdiv);
96
97 if (!setting)
98 return meson_clk_dualdiv_recalc_rate(hw, *parent_rate);
99
100 return __dualdiv_param_to_rate(*parent_rate, setting);
101}
102
103static int meson_clk_dualdiv_set_rate(struct clk_hw *hw, unsigned long rate,
104 unsigned long parent_rate)
105{
106 struct clk_regmap *clk = to_clk_regmap(hw);
107 struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
108 const struct meson_clk_dualdiv_param *setting =
109 __dualdiv_get_setting(rate, parent_rate, dualdiv);
110
111 if (!setting)
112 return -EINVAL;
113
114 meson_parm_write(clk->map, &dualdiv->dual, setting->dual);
115 meson_parm_write(clk->map, &dualdiv->n1, setting->n1 - 1);
116 meson_parm_write(clk->map, &dualdiv->m1, setting->m1 - 1);
117 meson_parm_write(clk->map, &dualdiv->n2, setting->n2 - 1);
118 meson_parm_write(clk->map, &dualdiv->m2, setting->m2 - 1);
119
120 return 0;
121}
122
123const struct clk_ops meson_clk_dualdiv_ops = {
124 .recalc_rate = meson_clk_dualdiv_recalc_rate,
125 .round_rate = meson_clk_dualdiv_round_rate,
126 .set_rate = meson_clk_dualdiv_set_rate,
127};
128EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ops);
129
130const struct clk_ops meson_clk_dualdiv_ro_ops = {
131 .recalc_rate = meson_clk_dualdiv_recalc_rate,
132};
133EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ro_ops);
134
135MODULE_DESCRIPTION("Amlogic dual divider driver");
136MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
137MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
138MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-dualdiv.h b/drivers/clk/meson/clk-dualdiv.h
new file mode 100644
index 000000000000..4aa939018012
--- /dev/null
+++ b/drivers/clk/meson/clk-dualdiv.h
@@ -0,0 +1,33 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLK_DUALDIV_H
8#define __MESON_CLK_DUALDIV_H
9
10#include <linux/clk-provider.h>
11#include "parm.h"
12
13struct meson_clk_dualdiv_param {
14 unsigned int n1;
15 unsigned int n2;
16 unsigned int m1;
17 unsigned int m2;
18 unsigned int dual;
19};
20
21struct meson_clk_dualdiv_data {
22 struct parm n1;
23 struct parm n2;
24 struct parm m1;
25 struct parm m2;
26 struct parm dual;
27 const struct meson_clk_dualdiv_param *table;
28};
29
30extern const struct clk_ops meson_clk_dualdiv_ops;
31extern const struct clk_ops meson_clk_dualdiv_ro_ops;
32
33#endif /* __MESON_CLK_DUALDIV_H */
diff --git a/drivers/clk/meson/clk-input.c b/drivers/clk/meson/clk-input.c
index 06b3e3bb6a66..086226e9dba6 100644
--- a/drivers/clk/meson/clk-input.c
+++ b/drivers/clk/meson/clk-input.c
@@ -7,7 +7,8 @@
7#include <linux/clk.h> 7#include <linux/clk.h>
8#include <linux/clk-provider.h> 8#include <linux/clk-provider.h>
9#include <linux/device.h> 9#include <linux/device.h>
10#include "clkc.h" 10#include <linux/module.h>
11#include "clk-input.h"
11 12
12static const struct clk_ops meson_clk_no_ops = {}; 13static const struct clk_ops meson_clk_no_ops = {};
13 14
@@ -42,3 +43,7 @@ struct clk_hw *meson_clk_hw_register_input(struct device *dev,
42 return ret ? ERR_PTR(ret) : hw; 43 return ret ? ERR_PTR(ret) : hw;
43} 44}
44EXPORT_SYMBOL_GPL(meson_clk_hw_register_input); 45EXPORT_SYMBOL_GPL(meson_clk_hw_register_input);
46
47MODULE_DESCRIPTION("Amlogic clock input helper");
48MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
49MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-input.h b/drivers/clk/meson/clk-input.h
new file mode 100644
index 000000000000..4a541b9685a6
--- /dev/null
+++ b/drivers/clk/meson/clk-input.h
@@ -0,0 +1,19 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLK_INPUT_H
8#define __MESON_CLK_INPUT_H
9
10#include <linux/clk-provider.h>
11
12struct device;
13
14struct clk_hw *meson_clk_hw_register_input(struct device *dev,
15 const char *of_name,
16 const char *clk_name,
17 unsigned long flags);
18
19#endif /* __MESON_CLK_INPUT_H */
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 650f75cc15a9..f76850d99e59 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -12,7 +12,11 @@
12 */ 12 */
13 13
14#include <linux/clk-provider.h> 14#include <linux/clk-provider.h>
15#include "clkc.h" 15#include <linux/module.h>
16#include <linux/spinlock.h>
17
18#include "clk-regmap.h"
19#include "clk-mpll.h"
16 20
17#define SDM_DEN 16384 21#define SDM_DEN 16384
18#define N2_MIN 4 22#define N2_MIN 4
@@ -138,9 +142,15 @@ const struct clk_ops meson_clk_mpll_ro_ops = {
138 .recalc_rate = mpll_recalc_rate, 142 .recalc_rate = mpll_recalc_rate,
139 .round_rate = mpll_round_rate, 143 .round_rate = mpll_round_rate,
140}; 144};
145EXPORT_SYMBOL_GPL(meson_clk_mpll_ro_ops);
141 146
142const struct clk_ops meson_clk_mpll_ops = { 147const struct clk_ops meson_clk_mpll_ops = {
143 .recalc_rate = mpll_recalc_rate, 148 .recalc_rate = mpll_recalc_rate,
144 .round_rate = mpll_round_rate, 149 .round_rate = mpll_round_rate,
145 .set_rate = mpll_set_rate, 150 .set_rate = mpll_set_rate,
146}; 151};
152EXPORT_SYMBOL_GPL(meson_clk_mpll_ops);
153
154MODULE_DESCRIPTION("Amlogic MPLL driver");
155MODULE_AUTHOR("Michael Turquette <mturquette@baylibre.com>");
156MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-mpll.h b/drivers/clk/meson/clk-mpll.h
new file mode 100644
index 000000000000..cf79340006dd
--- /dev/null
+++ b/drivers/clk/meson/clk-mpll.h
@@ -0,0 +1,30 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLK_MPLL_H
8#define __MESON_CLK_MPLL_H
9
10#include <linux/clk-provider.h>
11#include <linux/spinlock.h>
12
13#include "parm.h"
14
15struct meson_clk_mpll_data {
16 struct parm sdm;
17 struct parm sdm_en;
18 struct parm n2;
19 struct parm ssen;
20 struct parm misc;
21 spinlock_t *lock;
22 u8 flags;
23};
24
25#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
26
27extern const struct clk_ops meson_clk_mpll_ro_ops;
28extern const struct clk_ops meson_clk_mpll_ops;
29
30#endif /* __MESON_CLK_MPLL_H */
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index cba43748ce3d..80c3ada193a4 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -5,7 +5,10 @@
5 */ 5 */
6 6
7#include <linux/clk-provider.h> 7#include <linux/clk-provider.h>
8#include "clkc.h" 8#include <linux/module.h>
9
10#include "clk-regmap.h"
11#include "clk-phase.h"
9 12
10#define phase_step(_width) (360 / (1 << (_width))) 13#define phase_step(_width) (360 / (1 << (_width)))
11 14
@@ -15,13 +18,12 @@ meson_clk_phase_data(struct clk_regmap *clk)
15 return (struct meson_clk_phase_data *)clk->data; 18 return (struct meson_clk_phase_data *)clk->data;
16} 19}
17 20
18int meson_clk_degrees_from_val(unsigned int val, unsigned int width) 21static int meson_clk_degrees_from_val(unsigned int val, unsigned int width)
19{ 22{
20 return phase_step(width) * val; 23 return phase_step(width) * val;
21} 24}
22EXPORT_SYMBOL_GPL(meson_clk_degrees_from_val);
23 25
24unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width) 26static unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
25{ 27{
26 unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width)); 28 unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width));
27 29
@@ -31,7 +33,6 @@ unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
31 */ 33 */
32 return val % (1 << width); 34 return val % (1 << width);
33} 35}
34EXPORT_SYMBOL_GPL(meson_clk_degrees_to_val);
35 36
36static int meson_clk_phase_get_phase(struct clk_hw *hw) 37static int meson_clk_phase_get_phase(struct clk_hw *hw)
37{ 38{
@@ -61,3 +62,67 @@ const struct clk_ops meson_clk_phase_ops = {
61 .set_phase = meson_clk_phase_set_phase, 62 .set_phase = meson_clk_phase_set_phase,
62}; 63};
63EXPORT_SYMBOL_GPL(meson_clk_phase_ops); 64EXPORT_SYMBOL_GPL(meson_clk_phase_ops);
65
66/*
67 * This is a special clock for the audio controller.
68 * The phase of mst_sclk clock output can be controlled independently
69 * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
70 * Controlling these 3 phases as just one makes things simpler and
71 * give the same clock view to all the element on the i2s bus.
72 * If necessary, we can still control the phase in the tdm block
73 * which makes these independent control redundant.
74 */
75static inline struct meson_clk_triphase_data *
76meson_clk_triphase_data(struct clk_regmap *clk)
77{
78 return (struct meson_clk_triphase_data *)clk->data;
79}
80
81static void meson_clk_triphase_sync(struct clk_hw *hw)
82{
83 struct clk_regmap *clk = to_clk_regmap(hw);
84 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
85 unsigned int val;
86
87 /* Get phase 0 and sync it to phase 1 and 2 */
88 val = meson_parm_read(clk->map, &tph->ph0);
89 meson_parm_write(clk->map, &tph->ph1, val);
90 meson_parm_write(clk->map, &tph->ph2, val);
91}
92
93static int meson_clk_triphase_get_phase(struct clk_hw *hw)
94{
95 struct clk_regmap *clk = to_clk_regmap(hw);
96 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
97 unsigned int val;
98
99 /* Phase are in sync, reading phase 0 is enough */
100 val = meson_parm_read(clk->map, &tph->ph0);
101
102 return meson_clk_degrees_from_val(val, tph->ph0.width);
103}
104
105static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
106{
107 struct clk_regmap *clk = to_clk_regmap(hw);
108 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
109 unsigned int val;
110
111 val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
112 meson_parm_write(clk->map, &tph->ph0, val);
113 meson_parm_write(clk->map, &tph->ph1, val);
114 meson_parm_write(clk->map, &tph->ph2, val);
115
116 return 0;
117}
118
119const struct clk_ops meson_clk_triphase_ops = {
120 .init = meson_clk_triphase_sync,
121 .get_phase = meson_clk_triphase_get_phase,
122 .set_phase = meson_clk_triphase_set_phase,
123};
124EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
125
126MODULE_DESCRIPTION("Amlogic phase driver");
127MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
128MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-phase.h b/drivers/clk/meson/clk-phase.h
new file mode 100644
index 000000000000..5579f9ced142
--- /dev/null
+++ b/drivers/clk/meson/clk-phase.h
@@ -0,0 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLK_PHASE_H
8#define __MESON_CLK_PHASE_H
9
10#include <linux/clk-provider.h>
11#include "parm.h"
12
13struct meson_clk_phase_data {
14 struct parm ph;
15};
16
17struct meson_clk_triphase_data {
18 struct parm ph0;
19 struct parm ph1;
20 struct parm ph2;
21};
22
23extern const struct clk_ops meson_clk_phase_ops;
24extern const struct clk_ops meson_clk_triphase_ops;
25
26#endif /* __MESON_CLK_PHASE_H */
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index afffc1547e20..41e16dd7272a 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -32,11 +32,10 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/math64.h> 33#include <linux/math64.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/of_address.h> 35#include <linux/rational.h>
36#include <linux/slab.h>
37#include <linux/string.h>
38 36
39#include "clkc.h" 37#include "clk-regmap.h"
38#include "clk-pll.h"
40 39
41static inline struct meson_clk_pll_data * 40static inline struct meson_clk_pll_data *
42meson_clk_pll_data(struct clk_regmap *clk) 41meson_clk_pll_data(struct clk_regmap *clk)
@@ -44,12 +43,21 @@ meson_clk_pll_data(struct clk_regmap *clk)
44 return (struct meson_clk_pll_data *)clk->data; 43 return (struct meson_clk_pll_data *)clk->data;
45} 44}
46 45
46static int __pll_round_closest_mult(struct meson_clk_pll_data *pll)
47{
48 if ((pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) &&
49 !MESON_PARM_APPLICABLE(&pll->frac))
50 return 1;
51
52 return 0;
53}
54
47static unsigned long __pll_params_to_rate(unsigned long parent_rate, 55static unsigned long __pll_params_to_rate(unsigned long parent_rate,
48 const struct pll_params_table *pllt, 56 unsigned int m, unsigned int n,
49 u16 frac, 57 unsigned int frac,
50 struct meson_clk_pll_data *pll) 58 struct meson_clk_pll_data *pll)
51{ 59{
52 u64 rate = (u64)parent_rate * pllt->m; 60 u64 rate = (u64)parent_rate * m;
53 61
54 if (frac && MESON_PARM_APPLICABLE(&pll->frac)) { 62 if (frac && MESON_PARM_APPLICABLE(&pll->frac)) {
55 u64 frac_rate = (u64)parent_rate * frac; 63 u64 frac_rate = (u64)parent_rate * frac;
@@ -58,7 +66,7 @@ static unsigned long __pll_params_to_rate(unsigned long parent_rate,
58 (1 << pll->frac.width)); 66 (1 << pll->frac.width));
59 } 67 }
60 68
61 return DIV_ROUND_UP_ULL(rate, pllt->n); 69 return DIV_ROUND_UP_ULL(rate, n);
62} 70}
63 71
64static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, 72static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
@@ -66,35 +74,39 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
66{ 74{
67 struct clk_regmap *clk = to_clk_regmap(hw); 75 struct clk_regmap *clk = to_clk_regmap(hw);
68 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); 76 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
69 struct pll_params_table pllt; 77 unsigned int m, n, frac;
70 u16 frac;
71 78
72 pllt.n = meson_parm_read(clk->map, &pll->n); 79 n = meson_parm_read(clk->map, &pll->n);
73 pllt.m = meson_parm_read(clk->map, &pll->m); 80 m = meson_parm_read(clk->map, &pll->m);
74 81
75 frac = MESON_PARM_APPLICABLE(&pll->frac) ? 82 frac = MESON_PARM_APPLICABLE(&pll->frac) ?
76 meson_parm_read(clk->map, &pll->frac) : 83 meson_parm_read(clk->map, &pll->frac) :
77 0; 84 0;
78 85
79 return __pll_params_to_rate(parent_rate, &pllt, frac, pll); 86 return __pll_params_to_rate(parent_rate, m, n, frac, pll);
80} 87}
81 88
82static u16 __pll_params_with_frac(unsigned long rate, 89static unsigned int __pll_params_with_frac(unsigned long rate,
83 unsigned long parent_rate, 90 unsigned long parent_rate,
84 const struct pll_params_table *pllt, 91 unsigned int m,
85 struct meson_clk_pll_data *pll) 92 unsigned int n,
93 struct meson_clk_pll_data *pll)
86{ 94{
87 u16 frac_max = (1 << pll->frac.width); 95 unsigned int frac_max = (1 << pll->frac.width);
88 u64 val = (u64)rate * pllt->n; 96 u64 val = (u64)rate * n;
97
98 /* Bail out if we are already over the requested rate */
99 if (rate < parent_rate * m / n)
100 return 0;
89 101
90 if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) 102 if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST)
91 val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate); 103 val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate);
92 else 104 else
93 val = div_u64(val * frac_max, parent_rate); 105 val = div_u64(val * frac_max, parent_rate);
94 106
95 val -= pllt->m * frac_max; 107 val -= m * frac_max;
96 108
97 return min((u16)val, (u16)(frac_max - 1)); 109 return min((unsigned int)val, (frac_max - 1));
98} 110}
99 111
100static bool meson_clk_pll_is_better(unsigned long rate, 112static bool meson_clk_pll_is_better(unsigned long rate,
@@ -102,45 +114,123 @@ static bool meson_clk_pll_is_better(unsigned long rate,
102 unsigned long now, 114 unsigned long now,
103 struct meson_clk_pll_data *pll) 115 struct meson_clk_pll_data *pll)
104{ 116{
105 if (!(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) || 117 if (__pll_round_closest_mult(pll)) {
106 MESON_PARM_APPLICABLE(&pll->frac)) {
107 /* Round down */
108 if (now < rate && best < now)
109 return true;
110 } else {
111 /* Round Closest */ 118 /* Round Closest */
112 if (abs(now - rate) < abs(best - rate)) 119 if (abs(now - rate) < abs(best - rate))
113 return true; 120 return true;
121 } else {
122 /* Round down */
123 if (now < rate && best < now)
124 return true;
114 } 125 }
115 126
116 return false; 127 return false;
117} 128}
118 129
119static const struct pll_params_table * 130static int meson_clk_get_pll_table_index(unsigned int index,
120meson_clk_get_pll_settings(unsigned long rate, 131 unsigned int *m,
121 unsigned long parent_rate, 132 unsigned int *n,
122 struct meson_clk_pll_data *pll) 133 struct meson_clk_pll_data *pll)
123{ 134{
124 const struct pll_params_table *table = pll->table; 135 if (!pll->table[index].n)
125 unsigned long best = 0, now = 0; 136 return -EINVAL;
126 unsigned int i, best_i = 0; 137
138 *m = pll->table[index].m;
139 *n = pll->table[index].n;
140
141 return 0;
142}
143
144static unsigned int meson_clk_get_pll_range_m(unsigned long rate,
145 unsigned long parent_rate,
146 unsigned int n,
147 struct meson_clk_pll_data *pll)
148{
149 u64 val = (u64)rate * n;
127 150
128 if (!table) 151 if (__pll_round_closest_mult(pll))
129 return NULL; 152 return DIV_ROUND_CLOSEST_ULL(val, parent_rate);
130 153
131 for (i = 0; table[i].n; i++) { 154 return div_u64(val, parent_rate);
132 now = __pll_params_to_rate(parent_rate, &table[i], 0, pll); 155}
133 156
134 /* If we get an exact match, don't bother any further */ 157static int meson_clk_get_pll_range_index(unsigned long rate,
135 if (now == rate) { 158 unsigned long parent_rate,
136 return &table[i]; 159 unsigned int index,
137 } else if (meson_clk_pll_is_better(rate, best, now, pll)) { 160 unsigned int *m,
161 unsigned int *n,
162 struct meson_clk_pll_data *pll)
163{
164 *n = index + 1;
165
166 /* Check the predivider range */
167 if (*n >= (1 << pll->n.width))
168 return -EINVAL;
169
170 if (*n == 1) {
171 /* Get the boundaries out the way */
172 if (rate <= pll->range->min * parent_rate) {
173 *m = pll->range->min;
174 return -ENODATA;
175 } else if (rate >= pll->range->max * parent_rate) {
176 *m = pll->range->max;
177 return -ENODATA;
178 }
179 }
180
181 *m = meson_clk_get_pll_range_m(rate, parent_rate, *n, pll);
182
183 /* the pre-divider gives a multiplier too big - stop */
184 if (*m >= (1 << pll->m.width))
185 return -EINVAL;
186
187 return 0;
188}
189
190static int meson_clk_get_pll_get_index(unsigned long rate,
191 unsigned long parent_rate,
192 unsigned int index,
193 unsigned int *m,
194 unsigned int *n,
195 struct meson_clk_pll_data *pll)
196{
197 if (pll->range)
198 return meson_clk_get_pll_range_index(rate, parent_rate,
199 index, m, n, pll);
200 else if (pll->table)
201 return meson_clk_get_pll_table_index(index, m, n, pll);
202
203 return -EINVAL;
204}
205
206static int meson_clk_get_pll_settings(unsigned long rate,
207 unsigned long parent_rate,
208 unsigned int *best_m,
209 unsigned int *best_n,
210 struct meson_clk_pll_data *pll)
211{
212 unsigned long best = 0, now = 0;
213 unsigned int i, m, n;
214 int ret;
215
216 for (i = 0, ret = 0; !ret; i++) {
217 ret = meson_clk_get_pll_get_index(rate, parent_rate,
218 i, &m, &n, pll);
219 if (ret == -EINVAL)
220 break;
221
222 now = __pll_params_to_rate(parent_rate, m, n, 0, pll);
223 if (meson_clk_pll_is_better(rate, best, now, pll)) {
138 best = now; 224 best = now;
139 best_i = i; 225 *best_m = m;
226 *best_n = n;
227
228 if (now == rate)
229 break;
140 } 230 }
141 } 231 }
142 232
143 return (struct pll_params_table *)&table[best_i]; 233 return best ? 0 : -EINVAL;
144} 234}
145 235
146static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, 236static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -148,15 +238,15 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
148{ 238{
149 struct clk_regmap *clk = to_clk_regmap(hw); 239 struct clk_regmap *clk = to_clk_regmap(hw);
150 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); 240 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
151 const struct pll_params_table *pllt = 241 unsigned int m, n, frac;
152 meson_clk_get_pll_settings(rate, *parent_rate, pll);
153 unsigned long round; 242 unsigned long round;
154 u16 frac; 243 int ret;
155 244
156 if (!pllt) 245 ret = meson_clk_get_pll_settings(rate, *parent_rate, &m, &n, pll);
246 if (ret)
157 return meson_clk_pll_recalc_rate(hw, *parent_rate); 247 return meson_clk_pll_recalc_rate(hw, *parent_rate);
158 248
159 round = __pll_params_to_rate(*parent_rate, pllt, 0, pll); 249 round = __pll_params_to_rate(*parent_rate, m, n, 0, pll);
160 250
161 if (!MESON_PARM_APPLICABLE(&pll->frac) || rate == round) 251 if (!MESON_PARM_APPLICABLE(&pll->frac) || rate == round)
162 return round; 252 return round;
@@ -165,9 +255,9 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
165 * The rate provided by the setting is not an exact match, let's 255 * The rate provided by the setting is not an exact match, let's
166 * try to improve the result using the fractional parameter 256 * try to improve the result using the fractional parameter
167 */ 257 */
168 frac = __pll_params_with_frac(rate, *parent_rate, pllt, pll); 258 frac = __pll_params_with_frac(rate, *parent_rate, m, n, pll);
169 259
170 return __pll_params_to_rate(*parent_rate, pllt, frac, pll); 260 return __pll_params_to_rate(*parent_rate, m, n, frac, pll);
171} 261}
172 262
173static int meson_clk_pll_wait_lock(struct clk_hw *hw) 263static int meson_clk_pll_wait_lock(struct clk_hw *hw)
@@ -254,30 +344,27 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
254{ 344{
255 struct clk_regmap *clk = to_clk_regmap(hw); 345 struct clk_regmap *clk = to_clk_regmap(hw);
256 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); 346 struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
257 const struct pll_params_table *pllt; 347 unsigned int enabled, m, n, frac = 0, ret;
258 unsigned int enabled;
259 unsigned long old_rate; 348 unsigned long old_rate;
260 u16 frac = 0;
261 349
262 if (parent_rate == 0 || rate == 0) 350 if (parent_rate == 0 || rate == 0)
263 return -EINVAL; 351 return -EINVAL;
264 352
265 old_rate = rate; 353 old_rate = rate;
266 354
267 pllt = meson_clk_get_pll_settings(rate, parent_rate, pll); 355 ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
268 if (!pllt) 356 if (ret)
269 return -EINVAL; 357 return ret;
270 358
271 enabled = meson_parm_read(clk->map, &pll->en); 359 enabled = meson_parm_read(clk->map, &pll->en);
272 if (enabled) 360 if (enabled)
273 meson_clk_pll_disable(hw); 361 meson_clk_pll_disable(hw);
274 362
275 meson_parm_write(clk->map, &pll->n, pllt->n); 363 meson_parm_write(clk->map, &pll->n, n);
276 meson_parm_write(clk->map, &pll->m, pllt->m); 364 meson_parm_write(clk->map, &pll->m, m);
277
278 365
279 if (MESON_PARM_APPLICABLE(&pll->frac)) { 366 if (MESON_PARM_APPLICABLE(&pll->frac)) {
280 frac = __pll_params_with_frac(rate, parent_rate, pllt, pll); 367 frac = __pll_params_with_frac(rate, parent_rate, m, n, pll);
281 meson_parm_write(clk->map, &pll->frac, frac); 368 meson_parm_write(clk->map, &pll->frac, frac);
282 } 369 }
283 370
@@ -309,8 +396,15 @@ const struct clk_ops meson_clk_pll_ops = {
309 .enable = meson_clk_pll_enable, 396 .enable = meson_clk_pll_enable,
310 .disable = meson_clk_pll_disable 397 .disable = meson_clk_pll_disable
311}; 398};
399EXPORT_SYMBOL_GPL(meson_clk_pll_ops);
312 400
313const struct clk_ops meson_clk_pll_ro_ops = { 401const struct clk_ops meson_clk_pll_ro_ops = {
314 .recalc_rate = meson_clk_pll_recalc_rate, 402 .recalc_rate = meson_clk_pll_recalc_rate,
315 .is_enabled = meson_clk_pll_is_enabled, 403 .is_enabled = meson_clk_pll_is_enabled,
316}; 404};
405EXPORT_SYMBOL_GPL(meson_clk_pll_ro_ops);
406
407MODULE_DESCRIPTION("Amlogic PLL driver");
408MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
409MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
410MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h
new file mode 100644
index 000000000000..55af2e285b1b
--- /dev/null
+++ b/drivers/clk/meson/clk-pll.h
@@ -0,0 +1,49 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLK_PLL_H
8#define __MESON_CLK_PLL_H
9
10#include <linux/clk-provider.h>
11#include <linux/regmap.h>
12#include "parm.h"
13
14struct pll_params_table {
15 unsigned int m;
16 unsigned int n;
17};
18
19struct pll_mult_range {
20 unsigned int min;
21 unsigned int max;
22};
23
24#define PLL_PARAMS(_m, _n) \
25 { \
26 .m = (_m), \
27 .n = (_n), \
28 }
29
30#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
31
32struct meson_clk_pll_data {
33 struct parm en;
34 struct parm m;
35 struct parm n;
36 struct parm frac;
37 struct parm l;
38 struct parm rst;
39 const struct reg_sequence *init_regs;
40 unsigned int init_count;
41 const struct pll_params_table *table;
42 const struct pll_mult_range *range;
43 u8 flags;
44};
45
46extern const struct clk_ops meson_clk_pll_ro_ops;
47extern const struct clk_ops meson_clk_pll_ops;
48
49#endif /* __MESON_CLK_PLL_H */
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index c515f67322a3..dcd1757cc5df 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -4,6 +4,7 @@
4 * Author: Jerome Brunet <jbrunet@baylibre.com> 4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */ 5 */
6 6
7#include <linux/module.h>
7#include "clk-regmap.h" 8#include "clk-regmap.h"
8 9
9static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable) 10static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable)
@@ -180,3 +181,7 @@ const struct clk_ops clk_regmap_mux_ro_ops = {
180 .get_parent = clk_regmap_mux_get_parent, 181 .get_parent = clk_regmap_mux_get_parent,
181}; 182};
182EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops); 183EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops);
184
185MODULE_DESCRIPTION("Amlogic regmap backed clock driver");
186MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
187MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index e9c5728d40eb..1dd0abe3ba91 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -111,4 +111,24 @@ clk_get_regmap_mux_data(struct clk_regmap *clk)
111extern const struct clk_ops clk_regmap_mux_ops; 111extern const struct clk_ops clk_regmap_mux_ops;
112extern const struct clk_ops clk_regmap_mux_ro_ops; 112extern const struct clk_ops clk_regmap_mux_ro_ops;
113 113
114#define __MESON_GATE(_name, _reg, _bit, _ops) \
115struct clk_regmap _name = { \
116 .data = &(struct clk_regmap_gate_data){ \
117 .offset = (_reg), \
118 .bit_idx = (_bit), \
119 }, \
120 .hw.init = &(struct clk_init_data) { \
121 .name = #_name, \
122 .ops = _ops, \
123 .parent_names = (const char *[]){ "clk81" }, \
124 .num_parents = 1, \
125 .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
126 }, \
127}
128
129#define MESON_GATE(_name, _reg, _bit) \
130 __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ops)
131
132#define MESON_GATE_RO(_name, _reg, _bit) \
133 __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ro_ops)
114#endif /* __CLK_REGMAP_H */ 134#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/clk-triphase.c b/drivers/clk/meson/clk-triphase.c
deleted file mode 100644
index 4a59936251e5..000000000000
--- a/drivers/clk/meson/clk-triphase.c
+++ /dev/null
@@ -1,68 +0,0 @@
1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2/*
3 * Copyright (c) 2018 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#include <linux/clk-provider.h>
8#include "clkc-audio.h"
9
10/*
11 * This is a special clock for the audio controller.
12 * The phase of mst_sclk clock output can be controlled independently
13 * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
14 * Controlling these 3 phases as just one makes things simpler and
15 * give the same clock view to all the element on the i2s bus.
16 * If necessary, we can still control the phase in the tdm block
17 * which makes these independent control redundant.
18 */
19static inline struct meson_clk_triphase_data *
20meson_clk_triphase_data(struct clk_regmap *clk)
21{
22 return (struct meson_clk_triphase_data *)clk->data;
23}
24
25static void meson_clk_triphase_sync(struct clk_hw *hw)
26{
27 struct clk_regmap *clk = to_clk_regmap(hw);
28 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
29 unsigned int val;
30
31 /* Get phase 0 and sync it to phase 1 and 2 */
32 val = meson_parm_read(clk->map, &tph->ph0);
33 meson_parm_write(clk->map, &tph->ph1, val);
34 meson_parm_write(clk->map, &tph->ph2, val);
35}
36
37static int meson_clk_triphase_get_phase(struct clk_hw *hw)
38{
39 struct clk_regmap *clk = to_clk_regmap(hw);
40 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
41 unsigned int val;
42
43 /* Phase are in sync, reading phase 0 is enough */
44 val = meson_parm_read(clk->map, &tph->ph0);
45
46 return meson_clk_degrees_from_val(val, tph->ph0.width);
47}
48
49static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
50{
51 struct clk_regmap *clk = to_clk_regmap(hw);
52 struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
53 unsigned int val;
54
55 val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
56 meson_parm_write(clk->map, &tph->ph0, val);
57 meson_parm_write(clk->map, &tph->ph1, val);
58 meson_parm_write(clk->map, &tph->ph2, val);
59
60 return 0;
61}
62
63const struct clk_ops meson_clk_triphase_ops = {
64 .init = meson_clk_triphase_sync,
65 .get_phase = meson_clk_triphase_get_phase,
66 .set_phase = meson_clk_triphase_set_phase,
67};
68EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
deleted file mode 100644
index 6183b22c4bf2..000000000000
--- a/drivers/clk/meson/clkc.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2015 Endless Mobile, Inc.
4 * Author: Carlo Caione <carlo@endlessm.com>
5 */
6
7#ifndef __CLKC_H
8#define __CLKC_H
9
10#include <linux/clk-provider.h>
11#include "clk-regmap.h"
12
13#define PMASK(width) GENMASK(width - 1, 0)
14#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift)
15#define CLRPMASK(width, shift) (~SETPMASK(width, shift))
16
17#define PARM_GET(width, shift, reg) \
18 (((reg) & SETPMASK(width, shift)) >> (shift))
19#define PARM_SET(width, shift, reg, val) \
20 (((reg) & CLRPMASK(width, shift)) | ((val) << (shift)))
21
22#define MESON_PARM_APPLICABLE(p) (!!((p)->width))
23
24struct parm {
25 u16 reg_off;
26 u8 shift;
27 u8 width;
28};
29
30static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
31{
32 unsigned int val;
33
34 regmap_read(map, p->reg_off, &val);
35 return PARM_GET(p->width, p->shift, val);
36}
37
38static inline void meson_parm_write(struct regmap *map, struct parm *p,
39 unsigned int val)
40{
41 regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
42 val << p->shift);
43}
44
45
46struct pll_params_table {
47 u16 m;
48 u16 n;
49};
50
51#define PLL_PARAMS(_m, _n) \
52 { \
53 .m = (_m), \
54 .n = (_n), \
55 }
56
57#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
58
59struct meson_clk_pll_data {
60 struct parm en;
61 struct parm m;
62 struct parm n;
63 struct parm frac;
64 struct parm l;
65 struct parm rst;
66 const struct reg_sequence *init_regs;
67 unsigned int init_count;
68 const struct pll_params_table *table;
69 u8 flags;
70};
71
72#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
73
74struct meson_clk_mpll_data {
75 struct parm sdm;
76 struct parm sdm_en;
77 struct parm n2;
78 struct parm ssen;
79 struct parm misc;
80 spinlock_t *lock;
81 u8 flags;
82};
83
84#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
85
86struct meson_clk_phase_data {
87 struct parm ph;
88};
89
90int meson_clk_degrees_from_val(unsigned int val, unsigned int width);
91unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width);
92
93struct meson_vid_pll_div_data {
94 struct parm val;
95 struct parm sel;
96};
97
98#define MESON_GATE(_name, _reg, _bit) \
99struct clk_regmap _name = { \
100 .data = &(struct clk_regmap_gate_data){ \
101 .offset = (_reg), \
102 .bit_idx = (_bit), \
103 }, \
104 .hw.init = &(struct clk_init_data) { \
105 .name = #_name, \
106 .ops = &clk_regmap_gate_ops, \
107 .parent_names = (const char *[]){ "clk81" }, \
108 .num_parents = 1, \
109 .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
110 }, \
111};
112
113/* clk_ops */
114extern const struct clk_ops meson_clk_pll_ro_ops;
115extern const struct clk_ops meson_clk_pll_ops;
116extern const struct clk_ops meson_clk_cpu_ops;
117extern const struct clk_ops meson_clk_mpll_ro_ops;
118extern const struct clk_ops meson_clk_mpll_ops;
119extern const struct clk_ops meson_clk_phase_ops;
120extern const struct clk_ops meson_vid_pll_div_ro_ops;
121
122struct clk_hw *meson_clk_hw_register_input(struct device *dev,
123 const char *of_name,
124 const char *clk_name,
125 unsigned long flags);
126
127#endif /* __CLKC_H */
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
new file mode 100644
index 000000000000..1994e735396b
--- /dev/null
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -0,0 +1,454 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Amlogic Meson-AXG Clock Controller Driver
4 *
5 * Copyright (c) 2016 Baylibre SAS.
6 * Author: Michael Turquette <mturquette@baylibre.com>
7 *
8 * Copyright (c) 2019 Baylibre SAS.
9 * Author: Neil Armstrong <narmstrong@baylibre.com>
10 */
11#include <linux/clk-provider.h>
12#include <linux/platform_device.h>
13#include <linux/reset-controller.h>
14#include <linux/mfd/syscon.h>
15#include "meson-aoclk.h"
16#include "g12a-aoclk.h"
17
18#include "clk-regmap.h"
19#include "clk-dualdiv.h"
20
21#define IN_PREFIX "ao-in-"
22
23/*
24 * AO Configuration Clock registers offsets
25 * Register offsets from the data sheet must be multiplied by 4.
26 */
27#define AO_RTI_STATUS_REG3 0x0C
28#define AO_RTI_PWR_CNTL_REG0 0x10
29#define AO_RTI_GEN_CNTL_REG0 0x40
30#define AO_CLK_GATE0 0x4c
31#define AO_CLK_GATE0_SP 0x50
32#define AO_OSCIN_CNTL 0x58
33#define AO_CEC_CLK_CNTL_REG0 0x74
34#define AO_CEC_CLK_CNTL_REG1 0x78
35#define AO_SAR_CLK 0x90
36#define AO_RTC_ALT_CLK_CNTL0 0x94
37#define AO_RTC_ALT_CLK_CNTL1 0x98
38
39/*
40 * Like every other peripheral clock gate in Amlogic Clock drivers,
41 * we are using CLK_IGNORE_UNUSED here, so we keep the state of the
42 * bootloader. The goal is to remove this flag at some point.
43 * Actually removing it will require some extensive test to be done safely.
44 */
45#define AXG_AO_GATE(_name, _reg, _bit) \
46static struct clk_regmap g12a_aoclk_##_name = { \
47 .data = &(struct clk_regmap_gate_data) { \
48 .offset = (_reg), \
49 .bit_idx = (_bit), \
50 }, \
51 .hw.init = &(struct clk_init_data) { \
52 .name = "g12a_ao_" #_name, \
53 .ops = &clk_regmap_gate_ops, \
54 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
55 .num_parents = 1, \
56 .flags = CLK_IGNORE_UNUSED, \
57 }, \
58}
59
60AXG_AO_GATE(ahb, AO_CLK_GATE0, 0);
61AXG_AO_GATE(ir_in, AO_CLK_GATE0, 1);
62AXG_AO_GATE(i2c_m0, AO_CLK_GATE0, 2);
63AXG_AO_GATE(i2c_s0, AO_CLK_GATE0, 3);
64AXG_AO_GATE(uart, AO_CLK_GATE0, 4);
65AXG_AO_GATE(prod_i2c, AO_CLK_GATE0, 5);
66AXG_AO_GATE(uart2, AO_CLK_GATE0, 6);
67AXG_AO_GATE(ir_out, AO_CLK_GATE0, 7);
68AXG_AO_GATE(saradc, AO_CLK_GATE0, 8);
69AXG_AO_GATE(mailbox, AO_CLK_GATE0_SP, 0);
70AXG_AO_GATE(m3, AO_CLK_GATE0_SP, 1);
71AXG_AO_GATE(ahb_sram, AO_CLK_GATE0_SP, 2);
72AXG_AO_GATE(rti, AO_CLK_GATE0_SP, 3);
73AXG_AO_GATE(m4_fclk, AO_CLK_GATE0_SP, 4);
74AXG_AO_GATE(m4_hclk, AO_CLK_GATE0_SP, 5);
75
76static struct clk_regmap g12a_aoclk_cts_oscin = {
77 .data = &(struct clk_regmap_gate_data){
78 .offset = AO_RTI_PWR_CNTL_REG0,
79 .bit_idx = 14,
80 },
81 .hw.init = &(struct clk_init_data){
82 .name = "cts_oscin",
83 .ops = &clk_regmap_gate_ro_ops,
84 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
85 .num_parents = 1,
86 },
87};
88
89static const struct meson_clk_dualdiv_param g12a_32k_div_table[] = {
90 {
91 .dual = 1,
92 .n1 = 733,
93 .m1 = 8,
94 .n2 = 732,
95 .m2 = 11,
96 }, {}
97};
98
99/* 32k_by_oscin clock */
100
101static struct clk_regmap g12a_aoclk_32k_by_oscin_pre = {
102 .data = &(struct clk_regmap_gate_data){
103 .offset = AO_RTC_ALT_CLK_CNTL0,
104 .bit_idx = 31,
105 },
106 .hw.init = &(struct clk_init_data){
107 .name = "g12a_ao_32k_by_oscin_pre",
108 .ops = &clk_regmap_gate_ops,
109 .parent_names = (const char *[]){ "cts_oscin" },
110 .num_parents = 1,
111 },
112};
113
114static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
115 .data = &(struct meson_clk_dualdiv_data){
116 .n1 = {
117 .reg_off = AO_RTC_ALT_CLK_CNTL0,
118 .shift = 0,
119 .width = 12,
120 },
121 .n2 = {
122 .reg_off = AO_RTC_ALT_CLK_CNTL0,
123 .shift = 12,
124 .width = 12,
125 },
126 .m1 = {
127 .reg_off = AO_RTC_ALT_CLK_CNTL1,
128 .shift = 0,
129 .width = 12,
130 },
131 .m2 = {
132 .reg_off = AO_RTC_ALT_CLK_CNTL1,
133 .shift = 12,
134 .width = 12,
135 },
136 .dual = {
137 .reg_off = AO_RTC_ALT_CLK_CNTL0,
138 .shift = 28,
139 .width = 1,
140 },
141 .table = g12a_32k_div_table,
142 },
143 .hw.init = &(struct clk_init_data){
144 .name = "g12a_ao_32k_by_oscin_div",
145 .ops = &meson_clk_dualdiv_ops,
146 .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_pre" },
147 .num_parents = 1,
148 },
149};
150
151static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
152 .data = &(struct clk_regmap_mux_data) {
153 .offset = AO_RTC_ALT_CLK_CNTL1,
154 .mask = 0x1,
155 .shift = 24,
156 .flags = CLK_MUX_ROUND_CLOSEST,
157 },
158 .hw.init = &(struct clk_init_data){
159 .name = "g12a_ao_32k_by_oscin_sel",
160 .ops = &clk_regmap_mux_ops,
161 .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_div",
162 "g12a_ao_32k_by_oscin_pre" },
163 .num_parents = 2,
164 .flags = CLK_SET_RATE_PARENT,
165 },
166};
167
168static struct clk_regmap g12a_aoclk_32k_by_oscin = {
169 .data = &(struct clk_regmap_gate_data){
170 .offset = AO_RTC_ALT_CLK_CNTL0,
171 .bit_idx = 30,
172 },
173 .hw.init = &(struct clk_init_data){
174 .name = "g12a_ao_32k_by_oscin",
175 .ops = &clk_regmap_gate_ops,
176 .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_sel" },
177 .num_parents = 1,
178 .flags = CLK_SET_RATE_PARENT,
179 },
180};
181
182/* cec clock */
183
184static struct clk_regmap g12a_aoclk_cec_pre = {
185 .data = &(struct clk_regmap_gate_data){
186 .offset = AO_CEC_CLK_CNTL_REG0,
187 .bit_idx = 31,
188 },
189 .hw.init = &(struct clk_init_data){
190 .name = "g12a_ao_cec_pre",
191 .ops = &clk_regmap_gate_ops,
192 .parent_names = (const char *[]){ "cts_oscin" },
193 .num_parents = 1,
194 },
195};
196
197static struct clk_regmap g12a_aoclk_cec_div = {
198 .data = &(struct meson_clk_dualdiv_data){
199 .n1 = {
200 .reg_off = AO_CEC_CLK_CNTL_REG0,
201 .shift = 0,
202 .width = 12,
203 },
204 .n2 = {
205 .reg_off = AO_CEC_CLK_CNTL_REG0,
206 .shift = 12,
207 .width = 12,
208 },
209 .m1 = {
210 .reg_off = AO_CEC_CLK_CNTL_REG1,
211 .shift = 0,
212 .width = 12,
213 },
214 .m2 = {
215 .reg_off = AO_CEC_CLK_CNTL_REG1,
216 .shift = 12,
217 .width = 12,
218 },
219 .dual = {
220 .reg_off = AO_CEC_CLK_CNTL_REG0,
221 .shift = 28,
222 .width = 1,
223 },
224 .table = g12a_32k_div_table,
225 },
226 .hw.init = &(struct clk_init_data){
227 .name = "g12a_ao_cec_div",
228 .ops = &meson_clk_dualdiv_ops,
229 .parent_names = (const char *[]){ "g12a_ao_cec_pre" },
230 .num_parents = 1,
231 },
232};
233
234static struct clk_regmap g12a_aoclk_cec_sel = {
235 .data = &(struct clk_regmap_mux_data) {
236 .offset = AO_CEC_CLK_CNTL_REG1,
237 .mask = 0x1,
238 .shift = 24,
239 .flags = CLK_MUX_ROUND_CLOSEST,
240 },
241 .hw.init = &(struct clk_init_data){
242 .name = "g12a_ao_cec_sel",
243 .ops = &clk_regmap_mux_ops,
244 .parent_names = (const char *[]){ "g12a_ao_cec_div",
245 "g12a_ao_cec_pre" },
246 .num_parents = 2,
247 .flags = CLK_SET_RATE_PARENT,
248 },
249};
250
251static struct clk_regmap g12a_aoclk_cec = {
252 .data = &(struct clk_regmap_gate_data){
253 .offset = AO_CEC_CLK_CNTL_REG0,
254 .bit_idx = 30,
255 },
256 .hw.init = &(struct clk_init_data){
257 .name = "g12a_ao_cec",
258 .ops = &clk_regmap_gate_ops,
259 .parent_names = (const char *[]){ "g12a_ao_cec_sel" },
260 .num_parents = 1,
261 .flags = CLK_SET_RATE_PARENT,
262 },
263};
264
265static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
266 .data = &(struct clk_regmap_mux_data) {
267 .offset = AO_RTI_PWR_CNTL_REG0,
268 .mask = 0x1,
269 .shift = 10,
270 .flags = CLK_MUX_ROUND_CLOSEST,
271 },
272 .hw.init = &(struct clk_init_data){
273 .name = "g12a_ao_cts_rtc_oscin",
274 .ops = &clk_regmap_mux_ops,
275 .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin",
276 IN_PREFIX "ext_32k-0" },
277 .num_parents = 2,
278 .flags = CLK_SET_RATE_PARENT,
279 },
280};
281
282static struct clk_regmap g12a_aoclk_clk81 = {
283 .data = &(struct clk_regmap_mux_data) {
284 .offset = AO_RTI_PWR_CNTL_REG0,
285 .mask = 0x1,
286 .shift = 8,
287 .flags = CLK_MUX_ROUND_CLOSEST,
288 },
289 .hw.init = &(struct clk_init_data){
290 .name = "g12a_ao_clk81",
291 .ops = &clk_regmap_mux_ro_ops,
292 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
293 "g12a_ao_cts_rtc_oscin"},
294 .num_parents = 2,
295 .flags = CLK_SET_RATE_PARENT,
296 },
297};
298
299static struct clk_regmap g12a_aoclk_saradc_mux = {
300 .data = &(struct clk_regmap_mux_data) {
301 .offset = AO_SAR_CLK,
302 .mask = 0x3,
303 .shift = 9,
304 },
305 .hw.init = &(struct clk_init_data){
306 .name = "g12a_ao_saradc_mux",
307 .ops = &clk_regmap_mux_ops,
308 .parent_names = (const char *[]){ IN_PREFIX "xtal",
309 "g12a_ao_clk81" },
310 .num_parents = 2,
311 },
312};
313
314static struct clk_regmap g12a_aoclk_saradc_div = {
315 .data = &(struct clk_regmap_div_data) {
316 .offset = AO_SAR_CLK,
317 .shift = 0,
318 .width = 8,
319 },
320 .hw.init = &(struct clk_init_data){
321 .name = "g12a_ao_saradc_div",
322 .ops = &clk_regmap_divider_ops,
323 .parent_names = (const char *[]){ "g12a_ao_saradc_mux" },
324 .num_parents = 1,
325 .flags = CLK_SET_RATE_PARENT,
326 },
327};
328
329static struct clk_regmap g12a_aoclk_saradc_gate = {
330 .data = &(struct clk_regmap_gate_data) {
331 .offset = AO_SAR_CLK,
332 .bit_idx = 8,
333 },
334 .hw.init = &(struct clk_init_data){
335 .name = "g12a_ao_saradc_gate",
336 .ops = &clk_regmap_gate_ops,
337 .parent_names = (const char *[]){ "g12a_ao_saradc_div" },
338 .num_parents = 1,
339 .flags = CLK_SET_RATE_PARENT,
340 },
341};
342
343static const unsigned int g12a_aoclk_reset[] = {
344 [RESET_AO_IR_IN] = 16,
345 [RESET_AO_UART] = 17,
346 [RESET_AO_I2C_M] = 18,
347 [RESET_AO_I2C_S] = 19,
348 [RESET_AO_SAR_ADC] = 20,
349 [RESET_AO_UART2] = 22,
350 [RESET_AO_IR_OUT] = 23,
351};
352
353static struct clk_regmap *g12a_aoclk_regmap[] = {
354 &g12a_aoclk_ahb,
355 &g12a_aoclk_ir_in,
356 &g12a_aoclk_i2c_m0,
357 &g12a_aoclk_i2c_s0,
358 &g12a_aoclk_uart,
359 &g12a_aoclk_prod_i2c,
360 &g12a_aoclk_uart2,
361 &g12a_aoclk_ir_out,
362 &g12a_aoclk_saradc,
363 &g12a_aoclk_mailbox,
364 &g12a_aoclk_m3,
365 &g12a_aoclk_ahb_sram,
366 &g12a_aoclk_rti,
367 &g12a_aoclk_m4_fclk,
368 &g12a_aoclk_m4_hclk,
369 &g12a_aoclk_cts_oscin,
370 &g12a_aoclk_32k_by_oscin_pre,
371 &g12a_aoclk_32k_by_oscin_div,
372 &g12a_aoclk_32k_by_oscin_sel,
373 &g12a_aoclk_32k_by_oscin,
374 &g12a_aoclk_cec_pre,
375 &g12a_aoclk_cec_div,
376 &g12a_aoclk_cec_sel,
377 &g12a_aoclk_cec,
378 &g12a_aoclk_cts_rtc_oscin,
379 &g12a_aoclk_clk81,
380 &g12a_aoclk_saradc_mux,
381 &g12a_aoclk_saradc_div,
382 &g12a_aoclk_saradc_gate,
383};
384
385static const struct clk_hw_onecell_data g12a_aoclk_onecell_data = {
386 .hws = {
387 [CLKID_AO_AHB] = &g12a_aoclk_ahb.hw,
388 [CLKID_AO_IR_IN] = &g12a_aoclk_ir_in.hw,
389 [CLKID_AO_I2C_M0] = &g12a_aoclk_i2c_m0.hw,
390 [CLKID_AO_I2C_S0] = &g12a_aoclk_i2c_s0.hw,
391 [CLKID_AO_UART] = &g12a_aoclk_uart.hw,
392 [CLKID_AO_PROD_I2C] = &g12a_aoclk_prod_i2c.hw,
393 [CLKID_AO_UART2] = &g12a_aoclk_uart2.hw,
394 [CLKID_AO_IR_OUT] = &g12a_aoclk_ir_out.hw,
395 [CLKID_AO_SAR_ADC] = &g12a_aoclk_saradc.hw,
396 [CLKID_AO_MAILBOX] = &g12a_aoclk_mailbox.hw,
397 [CLKID_AO_M3] = &g12a_aoclk_m3.hw,
398 [CLKID_AO_AHB_SRAM] = &g12a_aoclk_ahb_sram.hw,
399 [CLKID_AO_RTI] = &g12a_aoclk_rti.hw,
400 [CLKID_AO_M4_FCLK] = &g12a_aoclk_m4_fclk.hw,
401 [CLKID_AO_M4_HCLK] = &g12a_aoclk_m4_hclk.hw,
402 [CLKID_AO_CLK81] = &g12a_aoclk_clk81.hw,
403 [CLKID_AO_SAR_ADC_SEL] = &g12a_aoclk_saradc_mux.hw,
404 [CLKID_AO_SAR_ADC_DIV] = &g12a_aoclk_saradc_div.hw,
405 [CLKID_AO_SAR_ADC_CLK] = &g12a_aoclk_saradc_gate.hw,
406 [CLKID_AO_CTS_OSCIN] = &g12a_aoclk_cts_oscin.hw,
407 [CLKID_AO_32K_PRE] = &g12a_aoclk_32k_by_oscin_pre.hw,
408 [CLKID_AO_32K_DIV] = &g12a_aoclk_32k_by_oscin_div.hw,
409 [CLKID_AO_32K_SEL] = &g12a_aoclk_32k_by_oscin_sel.hw,
410 [CLKID_AO_32K] = &g12a_aoclk_32k_by_oscin.hw,
411 [CLKID_AO_CEC_PRE] = &g12a_aoclk_cec_pre.hw,
412 [CLKID_AO_CEC_DIV] = &g12a_aoclk_cec_div.hw,
413 [CLKID_AO_CEC_SEL] = &g12a_aoclk_cec_sel.hw,
414 [CLKID_AO_CEC] = &g12a_aoclk_cec.hw,
415 [CLKID_AO_CTS_RTC_OSCIN] = &g12a_aoclk_cts_rtc_oscin.hw,
416 },
417 .num = NR_CLKS,
418};
419
420static const struct meson_aoclk_input g12a_aoclk_inputs[] = {
421 { .name = "xtal", .required = true },
422 { .name = "mpeg-clk", .required = true },
423 { .name = "ext-32k-0", .required = false },
424};
425
426static const struct meson_aoclk_data g12a_aoclkc_data = {
427 .reset_reg = AO_RTI_GEN_CNTL_REG0,
428 .num_reset = ARRAY_SIZE(g12a_aoclk_reset),
429 .reset = g12a_aoclk_reset,
430 .num_clks = ARRAY_SIZE(g12a_aoclk_regmap),
431 .clks = g12a_aoclk_regmap,
432 .hw_data = &g12a_aoclk_onecell_data,
433 .inputs = g12a_aoclk_inputs,
434 .num_inputs = ARRAY_SIZE(g12a_aoclk_inputs),
435 .input_prefix = IN_PREFIX,
436};
437
438static const struct of_device_id g12a_aoclkc_match_table[] = {
439 {
440 .compatible = "amlogic,meson-g12a-aoclkc",
441 .data = &g12a_aoclkc_data,
442 },
443 { }
444};
445
446static struct platform_driver g12a_aoclkc_driver = {
447 .probe = meson_aoclkc_probe,
448 .driver = {
449 .name = "g12a-aoclkc",
450 .of_match_table = g12a_aoclkc_match_table,
451 },
452};
453
454builtin_platform_driver(g12a_aoclkc_driver);
diff --git a/drivers/clk/meson/g12a-aoclk.h b/drivers/clk/meson/g12a-aoclk.h
new file mode 100644
index 000000000000..04b0d5506641
--- /dev/null
+++ b/drivers/clk/meson/g12a-aoclk.h
@@ -0,0 +1,34 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
2/*
3 * Copyright (c) 2019 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 */
6
7#ifndef __G12A_AOCLKC_H
8#define __G12A_AOCLKC_H
9
10/*
11 * CLKID index values
12 *
13 * These indices are entirely contrived and do not map onto the hardware.
14 * It has now been decided to expose everything by default in the DT header:
15 * include/dt-bindings/clock/g12a-aoclkc.h. Only the clocks ids we don't want
16 * to expose, such as the internal muxes and dividers of composite clocks,
17 * will remain defined here.
18 */
19#define CLKID_AO_SAR_ADC_SEL 16
20#define CLKID_AO_SAR_ADC_DIV 17
21#define CLKID_AO_CTS_OSCIN 19
22#define CLKID_AO_32K_PRE 20
23#define CLKID_AO_32K_DIV 21
24#define CLKID_AO_32K_SEL 22
25#define CLKID_AO_CEC_PRE 24
26#define CLKID_AO_CEC_DIV 25
27#define CLKID_AO_CEC_SEL 26
28
29#define NR_CLKS 29
30
31#include <dt-bindings/clock/g12a-aoclkc.h>
32#include <dt-bindings/reset/g12a-aoclkc.h>
33
34#endif /* __G12A_AOCLKC_H */
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
new file mode 100644
index 000000000000..0e1ce8c03259
--- /dev/null
+++ b/drivers/clk/meson/g12a.c
@@ -0,0 +1,2359 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Amlogic Meson-G12A Clock Controller Driver
4 *
5 * Copyright (c) 2016 Baylibre SAS.
6 * Author: Michael Turquette <mturquette@baylibre.com>
7 *
8 * Copyright (c) 2018 Amlogic, inc.
9 * Author: Qiufang Dai <qiufang.dai@amlogic.com>
10 * Author: Jian Hu <jian.hu@amlogic.com>
11 */
12
13#include <linux/clk-provider.h>
14#include <linux/init.h>
15#include <linux/of_device.h>
16#include <linux/platform_device.h>
17
18#include "clk-input.h"
19#include "clk-mpll.h"
20#include "clk-pll.h"
21#include "clk-regmap.h"
22#include "vid-pll-div.h"
23#include "meson-eeclk.h"
24#include "g12a.h"
25
26static DEFINE_SPINLOCK(meson_clk_lock);
27
28static struct clk_regmap g12a_fixed_pll_dco = {
29 .data = &(struct meson_clk_pll_data){
30 .en = {
31 .reg_off = HHI_FIX_PLL_CNTL0,
32 .shift = 28,
33 .width = 1,
34 },
35 .m = {
36 .reg_off = HHI_FIX_PLL_CNTL0,
37 .shift = 0,
38 .width = 8,
39 },
40 .n = {
41 .reg_off = HHI_FIX_PLL_CNTL0,
42 .shift = 10,
43 .width = 5,
44 },
45 .frac = {
46 .reg_off = HHI_FIX_PLL_CNTL1,
47 .shift = 0,
48 .width = 17,
49 },
50 .l = {
51 .reg_off = HHI_FIX_PLL_CNTL0,
52 .shift = 31,
53 .width = 1,
54 },
55 .rst = {
56 .reg_off = HHI_FIX_PLL_CNTL0,
57 .shift = 29,
58 .width = 1,
59 },
60 },
61 .hw.init = &(struct clk_init_data){
62 .name = "fixed_pll_dco",
63 .ops = &meson_clk_pll_ro_ops,
64 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
65 .num_parents = 1,
66 },
67};
68
69static struct clk_regmap g12a_fixed_pll = {
70 .data = &(struct clk_regmap_div_data){
71 .offset = HHI_FIX_PLL_CNTL0,
72 .shift = 16,
73 .width = 2,
74 .flags = CLK_DIVIDER_POWER_OF_TWO,
75 },
76 .hw.init = &(struct clk_init_data){
77 .name = "fixed_pll",
78 .ops = &clk_regmap_divider_ro_ops,
79 .parent_names = (const char *[]){ "fixed_pll_dco" },
80 .num_parents = 1,
81 /*
82 * This clock won't ever change at runtime so
83 * CLK_SET_RATE_PARENT is not required
84 */
85 },
86};
87
88/*
89 * Internal sys pll emulation configuration parameters
90 */
91static const struct reg_sequence g12a_sys_init_regs[] = {
92 { .reg = HHI_SYS_PLL_CNTL1, .def = 0x00000000 },
93 { .reg = HHI_SYS_PLL_CNTL2, .def = 0x00000000 },
94 { .reg = HHI_SYS_PLL_CNTL3, .def = 0x48681c00 },
95 { .reg = HHI_SYS_PLL_CNTL4, .def = 0x88770290 },
96 { .reg = HHI_SYS_PLL_CNTL5, .def = 0x39272000 },
97 { .reg = HHI_SYS_PLL_CNTL6, .def = 0x56540000 },
98};
99
100static struct clk_regmap g12a_sys_pll_dco = {
101 .data = &(struct meson_clk_pll_data){
102 .en = {
103 .reg_off = HHI_SYS_PLL_CNTL0,
104 .shift = 28,
105 .width = 1,
106 },
107 .m = {
108 .reg_off = HHI_SYS_PLL_CNTL0,
109 .shift = 0,
110 .width = 8,
111 },
112 .n = {
113 .reg_off = HHI_SYS_PLL_CNTL0,
114 .shift = 10,
115 .width = 5,
116 },
117 .l = {
118 .reg_off = HHI_SYS_PLL_CNTL0,
119 .shift = 31,
120 .width = 1,
121 },
122 .rst = {
123 .reg_off = HHI_SYS_PLL_CNTL0,
124 .shift = 29,
125 .width = 1,
126 },
127 .init_regs = g12a_sys_init_regs,
128 .init_count = ARRAY_SIZE(g12a_sys_init_regs),
129 },
130 .hw.init = &(struct clk_init_data){
131 .name = "sys_pll_dco",
132 .ops = &meson_clk_pll_ro_ops,
133 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
134 .num_parents = 1,
135 },
136};
137
138static struct clk_regmap g12a_sys_pll = {
139 .data = &(struct clk_regmap_div_data){
140 .offset = HHI_SYS_PLL_CNTL0,
141 .shift = 16,
142 .width = 3,
143 .flags = CLK_DIVIDER_POWER_OF_TWO,
144 },
145 .hw.init = &(struct clk_init_data){
146 .name = "sys_pll",
147 .ops = &clk_regmap_divider_ro_ops,
148 .parent_names = (const char *[]){ "sys_pll_dco" },
149 .num_parents = 1,
150 },
151};
152
153static const struct pll_mult_range g12a_gp0_pll_mult_range = {
154 .min = 55,
155 .max = 255,
156};
157
158/*
159 * Internal gp0 pll emulation configuration parameters
160 */
161static const struct reg_sequence g12a_gp0_init_regs[] = {
162 { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
163 { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
164 { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
165 { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
166 { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
167 { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
168};
169
170static struct clk_regmap g12a_gp0_pll_dco = {
171 .data = &(struct meson_clk_pll_data){
172 .en = {
173 .reg_off = HHI_GP0_PLL_CNTL0,
174 .shift = 28,
175 .width = 1,
176 },
177 .m = {
178 .reg_off = HHI_GP0_PLL_CNTL0,
179 .shift = 0,
180 .width = 8,
181 },
182 .n = {
183 .reg_off = HHI_GP0_PLL_CNTL0,
184 .shift = 10,
185 .width = 5,
186 },
187 .frac = {
188 .reg_off = HHI_GP0_PLL_CNTL1,
189 .shift = 0,
190 .width = 17,
191 },
192 .l = {
193 .reg_off = HHI_GP0_PLL_CNTL0,
194 .shift = 31,
195 .width = 1,
196 },
197 .rst = {
198 .reg_off = HHI_GP0_PLL_CNTL0,
199 .shift = 29,
200 .width = 1,
201 },
202 .range = &g12a_gp0_pll_mult_range,
203 .init_regs = g12a_gp0_init_regs,
204 .init_count = ARRAY_SIZE(g12a_gp0_init_regs),
205 },
206 .hw.init = &(struct clk_init_data){
207 .name = "gp0_pll_dco",
208 .ops = &meson_clk_pll_ops,
209 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
210 .num_parents = 1,
211 },
212};
213
214static struct clk_regmap g12a_gp0_pll = {
215 .data = &(struct clk_regmap_div_data){
216 .offset = HHI_GP0_PLL_CNTL0,
217 .shift = 16,
218 .width = 3,
219 .flags = (CLK_DIVIDER_POWER_OF_TWO |
220 CLK_DIVIDER_ROUND_CLOSEST),
221 },
222 .hw.init = &(struct clk_init_data){
223 .name = "gp0_pll",
224 .ops = &clk_regmap_divider_ops,
225 .parent_names = (const char *[]){ "gp0_pll_dco" },
226 .num_parents = 1,
227 .flags = CLK_SET_RATE_PARENT,
228 },
229};
230
231/*
232 * Internal hifi pll emulation configuration parameters
233 */
234static const struct reg_sequence g12a_hifi_init_regs[] = {
235 { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
236 { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
237 { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
238 { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
239 { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
240 { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
241};
242
243static struct clk_regmap g12a_hifi_pll_dco = {
244 .data = &(struct meson_clk_pll_data){
245 .en = {
246 .reg_off = HHI_HIFI_PLL_CNTL0,
247 .shift = 28,
248 .width = 1,
249 },
250 .m = {
251 .reg_off = HHI_HIFI_PLL_CNTL0,
252 .shift = 0,
253 .width = 8,
254 },
255 .n = {
256 .reg_off = HHI_HIFI_PLL_CNTL0,
257 .shift = 10,
258 .width = 5,
259 },
260 .frac = {
261 .reg_off = HHI_HIFI_PLL_CNTL1,
262 .shift = 0,
263 .width = 17,
264 },
265 .l = {
266 .reg_off = HHI_HIFI_PLL_CNTL0,
267 .shift = 31,
268 .width = 1,
269 },
270 .rst = {
271 .reg_off = HHI_HIFI_PLL_CNTL0,
272 .shift = 29,
273 .width = 1,
274 },
275 .range = &g12a_gp0_pll_mult_range,
276 .init_regs = g12a_hifi_init_regs,
277 .init_count = ARRAY_SIZE(g12a_hifi_init_regs),
278 .flags = CLK_MESON_PLL_ROUND_CLOSEST,
279 },
280 .hw.init = &(struct clk_init_data){
281 .name = "hifi_pll_dco",
282 .ops = &meson_clk_pll_ops,
283 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
284 .num_parents = 1,
285 },
286};
287
288static struct clk_regmap g12a_hifi_pll = {
289 .data = &(struct clk_regmap_div_data){
290 .offset = HHI_HIFI_PLL_CNTL0,
291 .shift = 16,
292 .width = 2,
293 .flags = (CLK_DIVIDER_POWER_OF_TWO |
294 CLK_DIVIDER_ROUND_CLOSEST),
295 },
296 .hw.init = &(struct clk_init_data){
297 .name = "hifi_pll",
298 .ops = &clk_regmap_divider_ops,
299 .parent_names = (const char *[]){ "hifi_pll_dco" },
300 .num_parents = 1,
301 .flags = CLK_SET_RATE_PARENT,
302 },
303};
304
305static struct clk_regmap g12a_hdmi_pll_dco = {
306 .data = &(struct meson_clk_pll_data){
307 .en = {
308 .reg_off = HHI_HDMI_PLL_CNTL0,
309 .shift = 28,
310 .width = 1,
311 },
312 .m = {
313 .reg_off = HHI_HDMI_PLL_CNTL0,
314 .shift = 0,
315 .width = 8,
316 },
317 .n = {
318 .reg_off = HHI_HDMI_PLL_CNTL0,
319 .shift = 10,
320 .width = 5,
321 },
322 .frac = {
323 .reg_off = HHI_HDMI_PLL_CNTL1,
324 .shift = 0,
325 .width = 16,
326 },
327 .l = {
328 .reg_off = HHI_HDMI_PLL_CNTL0,
329 .shift = 30,
330 .width = 1,
331 },
332 .rst = {
333 .reg_off = HHI_HDMI_PLL_CNTL0,
334 .shift = 29,
335 .width = 1,
336 },
337 },
338 .hw.init = &(struct clk_init_data){
339 .name = "hdmi_pll_dco",
340 .ops = &meson_clk_pll_ro_ops,
341 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
342 .num_parents = 1,
343 /*
344 * Display directly handle hdmi pll registers ATM, we need
345 * NOCACHE to keep our view of the clock as accurate as possible
346 */
347 .flags = CLK_GET_RATE_NOCACHE,
348 },
349};
350
351static struct clk_regmap g12a_hdmi_pll_od = {
352 .data = &(struct clk_regmap_div_data){
353 .offset = HHI_HDMI_PLL_CNTL0,
354 .shift = 16,
355 .width = 2,
356 .flags = CLK_DIVIDER_POWER_OF_TWO,
357 },
358 .hw.init = &(struct clk_init_data){
359 .name = "hdmi_pll_od",
360 .ops = &clk_regmap_divider_ro_ops,
361 .parent_names = (const char *[]){ "hdmi_pll_dco" },
362 .num_parents = 1,
363 .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
364 },
365};
366
367static struct clk_regmap g12a_hdmi_pll_od2 = {
368 .data = &(struct clk_regmap_div_data){
369 .offset = HHI_HDMI_PLL_CNTL0,
370 .shift = 18,
371 .width = 2,
372 .flags = CLK_DIVIDER_POWER_OF_TWO,
373 },
374 .hw.init = &(struct clk_init_data){
375 .name = "hdmi_pll_od2",
376 .ops = &clk_regmap_divider_ro_ops,
377 .parent_names = (const char *[]){ "hdmi_pll_od" },
378 .num_parents = 1,
379 .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
380 },
381};
382
383static struct clk_regmap g12a_hdmi_pll = {
384 .data = &(struct clk_regmap_div_data){
385 .offset = HHI_HDMI_PLL_CNTL0,
386 .shift = 20,
387 .width = 2,
388 .flags = CLK_DIVIDER_POWER_OF_TWO,
389 },
390 .hw.init = &(struct clk_init_data){
391 .name = "hdmi_pll",
392 .ops = &clk_regmap_divider_ro_ops,
393 .parent_names = (const char *[]){ "hdmi_pll_od2" },
394 .num_parents = 1,
395 .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
396 },
397};
398
399static struct clk_fixed_factor g12a_fclk_div2_div = {
400 .mult = 1,
401 .div = 2,
402 .hw.init = &(struct clk_init_data){
403 .name = "fclk_div2_div",
404 .ops = &clk_fixed_factor_ops,
405 .parent_names = (const char *[]){ "fixed_pll" },
406 .num_parents = 1,
407 },
408};
409
410static struct clk_regmap g12a_fclk_div2 = {
411 .data = &(struct clk_regmap_gate_data){
412 .offset = HHI_FIX_PLL_CNTL1,
413 .bit_idx = 24,
414 },
415 .hw.init = &(struct clk_init_data){
416 .name = "fclk_div2",
417 .ops = &clk_regmap_gate_ops,
418 .parent_names = (const char *[]){ "fclk_div2_div" },
419 .num_parents = 1,
420 },
421};
422
423static struct clk_fixed_factor g12a_fclk_div3_div = {
424 .mult = 1,
425 .div = 3,
426 .hw.init = &(struct clk_init_data){
427 .name = "fclk_div3_div",
428 .ops = &clk_fixed_factor_ops,
429 .parent_names = (const char *[]){ "fixed_pll" },
430 .num_parents = 1,
431 },
432};
433
434static struct clk_regmap g12a_fclk_div3 = {
435 .data = &(struct clk_regmap_gate_data){
436 .offset = HHI_FIX_PLL_CNTL1,
437 .bit_idx = 20,
438 },
439 .hw.init = &(struct clk_init_data){
440 .name = "fclk_div3",
441 .ops = &clk_regmap_gate_ops,
442 .parent_names = (const char *[]){ "fclk_div3_div" },
443 .num_parents = 1,
444 },
445};
446
447static struct clk_fixed_factor g12a_fclk_div4_div = {
448 .mult = 1,
449 .div = 4,
450 .hw.init = &(struct clk_init_data){
451 .name = "fclk_div4_div",
452 .ops = &clk_fixed_factor_ops,
453 .parent_names = (const char *[]){ "fixed_pll" },
454 .num_parents = 1,
455 },
456};
457
458static struct clk_regmap g12a_fclk_div4 = {
459 .data = &(struct clk_regmap_gate_data){
460 .offset = HHI_FIX_PLL_CNTL1,
461 .bit_idx = 21,
462 },
463 .hw.init = &(struct clk_init_data){
464 .name = "fclk_div4",
465 .ops = &clk_regmap_gate_ops,
466 .parent_names = (const char *[]){ "fclk_div4_div" },
467 .num_parents = 1,
468 },
469};
470
471static struct clk_fixed_factor g12a_fclk_div5_div = {
472 .mult = 1,
473 .div = 5,
474 .hw.init = &(struct clk_init_data){
475 .name = "fclk_div5_div",
476 .ops = &clk_fixed_factor_ops,
477 .parent_names = (const char *[]){ "fixed_pll" },
478 .num_parents = 1,
479 },
480};
481
482static struct clk_regmap g12a_fclk_div5 = {
483 .data = &(struct clk_regmap_gate_data){
484 .offset = HHI_FIX_PLL_CNTL1,
485 .bit_idx = 22,
486 },
487 .hw.init = &(struct clk_init_data){
488 .name = "fclk_div5",
489 .ops = &clk_regmap_gate_ops,
490 .parent_names = (const char *[]){ "fclk_div5_div" },
491 .num_parents = 1,
492 },
493};
494
495static struct clk_fixed_factor g12a_fclk_div7_div = {
496 .mult = 1,
497 .div = 7,
498 .hw.init = &(struct clk_init_data){
499 .name = "fclk_div7_div",
500 .ops = &clk_fixed_factor_ops,
501 .parent_names = (const char *[]){ "fixed_pll" },
502 .num_parents = 1,
503 },
504};
505
506static struct clk_regmap g12a_fclk_div7 = {
507 .data = &(struct clk_regmap_gate_data){
508 .offset = HHI_FIX_PLL_CNTL1,
509 .bit_idx = 23,
510 },
511 .hw.init = &(struct clk_init_data){
512 .name = "fclk_div7",
513 .ops = &clk_regmap_gate_ops,
514 .parent_names = (const char *[]){ "fclk_div7_div" },
515 .num_parents = 1,
516 },
517};
518
519static struct clk_fixed_factor g12a_fclk_div2p5_div = {
520 .mult = 1,
521 .div = 5,
522 .hw.init = &(struct clk_init_data){
523 .name = "fclk_div2p5_div",
524 .ops = &clk_fixed_factor_ops,
525 .parent_names = (const char *[]){ "fixed_pll_dco" },
526 .num_parents = 1,
527 },
528};
529
530static struct clk_regmap g12a_fclk_div2p5 = {
531 .data = &(struct clk_regmap_gate_data){
532 .offset = HHI_FIX_PLL_CNTL1,
533 .bit_idx = 25,
534 },
535 .hw.init = &(struct clk_init_data){
536 .name = "fclk_div2p5",
537 .ops = &clk_regmap_gate_ops,
538 .parent_names = (const char *[]){ "fclk_div2p5_div" },
539 .num_parents = 1,
540 },
541};
542
543static struct clk_fixed_factor g12a_mpll_50m_div = {
544 .mult = 1,
545 .div = 80,
546 .hw.init = &(struct clk_init_data){
547 .name = "mpll_50m_div",
548 .ops = &clk_fixed_factor_ops,
549 .parent_names = (const char *[]){ "fixed_pll_dco" },
550 .num_parents = 1,
551 },
552};
553
554static struct clk_regmap g12a_mpll_50m = {
555 .data = &(struct clk_regmap_mux_data){
556 .offset = HHI_FIX_PLL_CNTL3,
557 .mask = 0x1,
558 .shift = 5,
559 },
560 .hw.init = &(struct clk_init_data){
561 .name = "mpll_50m",
562 .ops = &clk_regmap_mux_ro_ops,
563 .parent_names = (const char *[]){ IN_PREFIX "xtal",
564 "mpll_50m_div" },
565 .num_parents = 2,
566 },
567};
568
569static struct clk_fixed_factor g12a_mpll_prediv = {
570 .mult = 1,
571 .div = 2,
572 .hw.init = &(struct clk_init_data){
573 .name = "mpll_prediv",
574 .ops = &clk_fixed_factor_ops,
575 .parent_names = (const char *[]){ "fixed_pll_dco" },
576 .num_parents = 1,
577 },
578};
579
580static struct clk_regmap g12a_mpll0_div = {
581 .data = &(struct meson_clk_mpll_data){
582 .sdm = {
583 .reg_off = HHI_MPLL_CNTL1,
584 .shift = 0,
585 .width = 14,
586 },
587 .sdm_en = {
588 .reg_off = HHI_MPLL_CNTL1,
589 .shift = 30,
590 .width = 1,
591 },
592 .n2 = {
593 .reg_off = HHI_MPLL_CNTL1,
594 .shift = 20,
595 .width = 9,
596 },
597 .ssen = {
598 .reg_off = HHI_MPLL_CNTL1,
599 .shift = 29,
600 .width = 1,
601 },
602 .lock = &meson_clk_lock,
603 },
604 .hw.init = &(struct clk_init_data){
605 .name = "mpll0_div",
606 .ops = &meson_clk_mpll_ops,
607 .parent_names = (const char *[]){ "mpll_prediv" },
608 .num_parents = 1,
609 },
610};
611
612static struct clk_regmap g12a_mpll0 = {
613 .data = &(struct clk_regmap_gate_data){
614 .offset = HHI_MPLL_CNTL1,
615 .bit_idx = 31,
616 },
617 .hw.init = &(struct clk_init_data){
618 .name = "mpll0",
619 .ops = &clk_regmap_gate_ops,
620 .parent_names = (const char *[]){ "mpll0_div" },
621 .num_parents = 1,
622 .flags = CLK_SET_RATE_PARENT,
623 },
624};
625
626static struct clk_regmap g12a_mpll1_div = {
627 .data = &(struct meson_clk_mpll_data){
628 .sdm = {
629 .reg_off = HHI_MPLL_CNTL3,
630 .shift = 0,
631 .width = 14,
632 },
633 .sdm_en = {
634 .reg_off = HHI_MPLL_CNTL3,
635 .shift = 30,
636 .width = 1,
637 },
638 .n2 = {
639 .reg_off = HHI_MPLL_CNTL3,
640 .shift = 20,
641 .width = 9,
642 },
643 .ssen = {
644 .reg_off = HHI_MPLL_CNTL3,
645 .shift = 29,
646 .width = 1,
647 },
648 .lock = &meson_clk_lock,
649 },
650 .hw.init = &(struct clk_init_data){
651 .name = "mpll1_div",
652 .ops = &meson_clk_mpll_ops,
653 .parent_names = (const char *[]){ "mpll_prediv" },
654 .num_parents = 1,
655 },
656};
657
658static struct clk_regmap g12a_mpll1 = {
659 .data = &(struct clk_regmap_gate_data){
660 .offset = HHI_MPLL_CNTL3,
661 .bit_idx = 31,
662 },
663 .hw.init = &(struct clk_init_data){
664 .name = "mpll1",
665 .ops = &clk_regmap_gate_ops,
666 .parent_names = (const char *[]){ "mpll1_div" },
667 .num_parents = 1,
668 .flags = CLK_SET_RATE_PARENT,
669 },
670};
671
672static struct clk_regmap g12a_mpll2_div = {
673 .data = &(struct meson_clk_mpll_data){
674 .sdm = {
675 .reg_off = HHI_MPLL_CNTL5,
676 .shift = 0,
677 .width = 14,
678 },
679 .sdm_en = {
680 .reg_off = HHI_MPLL_CNTL5,
681 .shift = 30,
682 .width = 1,
683 },
684 .n2 = {
685 .reg_off = HHI_MPLL_CNTL5,
686 .shift = 20,
687 .width = 9,
688 },
689 .ssen = {
690 .reg_off = HHI_MPLL_CNTL5,
691 .shift = 29,
692 .width = 1,
693 },
694 .lock = &meson_clk_lock,
695 },
696 .hw.init = &(struct clk_init_data){
697 .name = "mpll2_div",
698 .ops = &meson_clk_mpll_ops,
699 .parent_names = (const char *[]){ "mpll_prediv" },
700 .num_parents = 1,
701 },
702};
703
704static struct clk_regmap g12a_mpll2 = {
705 .data = &(struct clk_regmap_gate_data){
706 .offset = HHI_MPLL_CNTL5,
707 .bit_idx = 31,
708 },
709 .hw.init = &(struct clk_init_data){
710 .name = "mpll2",
711 .ops = &clk_regmap_gate_ops,
712 .parent_names = (const char *[]){ "mpll2_div" },
713 .num_parents = 1,
714 .flags = CLK_SET_RATE_PARENT,
715 },
716};
717
718static struct clk_regmap g12a_mpll3_div = {
719 .data = &(struct meson_clk_mpll_data){
720 .sdm = {
721 .reg_off = HHI_MPLL_CNTL7,
722 .shift = 0,
723 .width = 14,
724 },
725 .sdm_en = {
726 .reg_off = HHI_MPLL_CNTL7,
727 .shift = 30,
728 .width = 1,
729 },
730 .n2 = {
731 .reg_off = HHI_MPLL_CNTL7,
732 .shift = 20,
733 .width = 9,
734 },
735 .ssen = {
736 .reg_off = HHI_MPLL_CNTL7,
737 .shift = 29,
738 .width = 1,
739 },
740 .lock = &meson_clk_lock,
741 },
742 .hw.init = &(struct clk_init_data){
743 .name = "mpll3_div",
744 .ops = &meson_clk_mpll_ops,
745 .parent_names = (const char *[]){ "mpll_prediv" },
746 .num_parents = 1,
747 },
748};
749
750static struct clk_regmap g12a_mpll3 = {
751 .data = &(struct clk_regmap_gate_data){
752 .offset = HHI_MPLL_CNTL7,
753 .bit_idx = 31,
754 },
755 .hw.init = &(struct clk_init_data){
756 .name = "mpll3",
757 .ops = &clk_regmap_gate_ops,
758 .parent_names = (const char *[]){ "mpll3_div" },
759 .num_parents = 1,
760 .flags = CLK_SET_RATE_PARENT,
761 },
762};
763
764static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
765static const char * const clk81_parent_names[] = {
766 IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
767 "fclk_div3", "fclk_div5"
768};
769
770static struct clk_regmap g12a_mpeg_clk_sel = {
771 .data = &(struct clk_regmap_mux_data){
772 .offset = HHI_MPEG_CLK_CNTL,
773 .mask = 0x7,
774 .shift = 12,
775 .table = mux_table_clk81,
776 },
777 .hw.init = &(struct clk_init_data){
778 .name = "mpeg_clk_sel",
779 .ops = &clk_regmap_mux_ro_ops,
780 .parent_names = clk81_parent_names,
781 .num_parents = ARRAY_SIZE(clk81_parent_names),
782 },
783};
784
785static struct clk_regmap g12a_mpeg_clk_div = {
786 .data = &(struct clk_regmap_div_data){
787 .offset = HHI_MPEG_CLK_CNTL,
788 .shift = 0,
789 .width = 7,
790 },
791 .hw.init = &(struct clk_init_data){
792 .name = "mpeg_clk_div",
793 .ops = &clk_regmap_divider_ops,
794 .parent_names = (const char *[]){ "mpeg_clk_sel" },
795 .num_parents = 1,
796 .flags = CLK_SET_RATE_PARENT,
797 },
798};
799
800static struct clk_regmap g12a_clk81 = {
801 .data = &(struct clk_regmap_gate_data){
802 .offset = HHI_MPEG_CLK_CNTL,
803 .bit_idx = 7,
804 },
805 .hw.init = &(struct clk_init_data){
806 .name = "clk81",
807 .ops = &clk_regmap_gate_ops,
808 .parent_names = (const char *[]){ "mpeg_clk_div" },
809 .num_parents = 1,
810 .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
811 },
812};
813
814static const char * const g12a_sd_emmc_clk0_parent_names[] = {
815 IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
816
817 /*
818 * Following these parent clocks, we should also have had mpll2, mpll3
819 * and gp0_pll but these clocks are too precious to be used here. All
820 * the necessary rates for MMC and NAND operation can be acheived using
821 * g12a_ee_core or fclk_div clocks
822 */
823};
824
825/* SDIO clock */
826static struct clk_regmap g12a_sd_emmc_a_clk0_sel = {
827 .data = &(struct clk_regmap_mux_data){
828 .offset = HHI_SD_EMMC_CLK_CNTL,
829 .mask = 0x7,
830 .shift = 9,
831 },
832 .hw.init = &(struct clk_init_data) {
833 .name = "sd_emmc_a_clk0_sel",
834 .ops = &clk_regmap_mux_ops,
835 .parent_names = g12a_sd_emmc_clk0_parent_names,
836 .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
837 .flags = CLK_SET_RATE_PARENT,
838 },
839};
840
841static struct clk_regmap g12a_sd_emmc_a_clk0_div = {
842 .data = &(struct clk_regmap_div_data){
843 .offset = HHI_SD_EMMC_CLK_CNTL,
844 .shift = 0,
845 .width = 7,
846 },
847 .hw.init = &(struct clk_init_data) {
848 .name = "sd_emmc_a_clk0_div",
849 .ops = &clk_regmap_divider_ops,
850 .parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" },
851 .num_parents = 1,
852 .flags = CLK_SET_RATE_PARENT,
853 },
854};
855
856static struct clk_regmap g12a_sd_emmc_a_clk0 = {
857 .data = &(struct clk_regmap_gate_data){
858 .offset = HHI_SD_EMMC_CLK_CNTL,
859 .bit_idx = 7,
860 },
861 .hw.init = &(struct clk_init_data){
862 .name = "sd_emmc_a_clk0",
863 .ops = &clk_regmap_gate_ops,
864 .parent_names = (const char *[]){ "sd_emmc_a_clk0_div" },
865 .num_parents = 1,
866 .flags = CLK_SET_RATE_PARENT,
867 },
868};
869
870/* SDcard clock */
871static struct clk_regmap g12a_sd_emmc_b_clk0_sel = {
872 .data = &(struct clk_regmap_mux_data){
873 .offset = HHI_SD_EMMC_CLK_CNTL,
874 .mask = 0x7,
875 .shift = 25,
876 },
877 .hw.init = &(struct clk_init_data) {
878 .name = "sd_emmc_b_clk0_sel",
879 .ops = &clk_regmap_mux_ops,
880 .parent_names = g12a_sd_emmc_clk0_parent_names,
881 .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
882 .flags = CLK_SET_RATE_PARENT,
883 },
884};
885
886static struct clk_regmap g12a_sd_emmc_b_clk0_div = {
887 .data = &(struct clk_regmap_div_data){
888 .offset = HHI_SD_EMMC_CLK_CNTL,
889 .shift = 16,
890 .width = 7,
891 },
892 .hw.init = &(struct clk_init_data) {
893 .name = "sd_emmc_b_clk0_div",
894 .ops = &clk_regmap_divider_ops,
895 .parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
896 .num_parents = 1,
897 .flags = CLK_SET_RATE_PARENT,
898 },
899};
900
901static struct clk_regmap g12a_sd_emmc_b_clk0 = {
902 .data = &(struct clk_regmap_gate_data){
903 .offset = HHI_SD_EMMC_CLK_CNTL,
904 .bit_idx = 23,
905 },
906 .hw.init = &(struct clk_init_data){
907 .name = "sd_emmc_b_clk0",
908 .ops = &clk_regmap_gate_ops,
909 .parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
910 .num_parents = 1,
911 .flags = CLK_SET_RATE_PARENT,
912 },
913};
914
915/* EMMC/NAND clock */
916static struct clk_regmap g12a_sd_emmc_c_clk0_sel = {
917 .data = &(struct clk_regmap_mux_data){
918 .offset = HHI_NAND_CLK_CNTL,
919 .mask = 0x7,
920 .shift = 9,
921 },
922 .hw.init = &(struct clk_init_data) {
923 .name = "sd_emmc_c_clk0_sel",
924 .ops = &clk_regmap_mux_ops,
925 .parent_names = g12a_sd_emmc_clk0_parent_names,
926 .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
927 .flags = CLK_SET_RATE_PARENT,
928 },
929};
930
931static struct clk_regmap g12a_sd_emmc_c_clk0_div = {
932 .data = &(struct clk_regmap_div_data){
933 .offset = HHI_NAND_CLK_CNTL,
934 .shift = 0,
935 .width = 7,
936 },
937 .hw.init = &(struct clk_init_data) {
938 .name = "sd_emmc_c_clk0_div",
939 .ops = &clk_regmap_divider_ops,
940 .parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
941 .num_parents = 1,
942 .flags = CLK_SET_RATE_PARENT,
943 },
944};
945
946static struct clk_regmap g12a_sd_emmc_c_clk0 = {
947 .data = &(struct clk_regmap_gate_data){
948 .offset = HHI_NAND_CLK_CNTL,
949 .bit_idx = 7,
950 },
951 .hw.init = &(struct clk_init_data){
952 .name = "sd_emmc_c_clk0",
953 .ops = &clk_regmap_gate_ops,
954 .parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
955 .num_parents = 1,
956 .flags = CLK_SET_RATE_PARENT,
957 },
958};
959
960/* VPU Clock */
961
962static const char * const g12a_vpu_parent_names[] = {
963 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
964 "mpll1", "vid_pll", "hifi_pll", "gp0_pll",
965};
966
967static struct clk_regmap g12a_vpu_0_sel = {
968 .data = &(struct clk_regmap_mux_data){
969 .offset = HHI_VPU_CLK_CNTL,
970 .mask = 0x3,
971 .shift = 9,
972 },
973 .hw.init = &(struct clk_init_data){
974 .name = "vpu_0_sel",
975 .ops = &clk_regmap_mux_ops,
976 .parent_names = g12a_vpu_parent_names,
977 .num_parents = ARRAY_SIZE(g12a_vpu_parent_names),
978 .flags = CLK_SET_RATE_NO_REPARENT,
979 },
980};
981
982static struct clk_regmap g12a_vpu_0_div = {
983 .data = &(struct clk_regmap_div_data){
984 .offset = HHI_VPU_CLK_CNTL,
985 .shift = 0,
986 .width = 7,
987 },
988 .hw.init = &(struct clk_init_data){
989 .name = "vpu_0_div",
990 .ops = &clk_regmap_divider_ops,
991 .parent_names = (const char *[]){ "vpu_0_sel" },
992 .num_parents = 1,
993 .flags = CLK_SET_RATE_PARENT,
994 },
995};
996
997static struct clk_regmap g12a_vpu_0 = {
998 .data = &(struct clk_regmap_gate_data){
999 .offset = HHI_VPU_CLK_CNTL,
1000 .bit_idx = 8,
1001 },
1002 .hw.init = &(struct clk_init_data) {
1003 .name = "vpu_0",
1004 .ops = &clk_regmap_gate_ops,
1005 .parent_names = (const char *[]){ "vpu_0_div" },
1006 .num_parents = 1,
1007 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1008 },
1009};
1010
1011static struct clk_regmap g12a_vpu_1_sel = {
1012 .data = &(struct clk_regmap_mux_data){
1013 .offset = HHI_VPU_CLK_CNTL,
1014 .mask = 0x3,
1015 .shift = 25,
1016 },
1017 .hw.init = &(struct clk_init_data){
1018 .name = "vpu_1_sel",
1019 .ops = &clk_regmap_mux_ops,
1020 .parent_names = g12a_vpu_parent_names,
1021 .num_parents = ARRAY_SIZE(g12a_vpu_parent_names),
1022 .flags = CLK_SET_RATE_NO_REPARENT,
1023 },
1024};
1025
1026static struct clk_regmap g12a_vpu_1_div = {
1027 .data = &(struct clk_regmap_div_data){
1028 .offset = HHI_VPU_CLK_CNTL,
1029 .shift = 16,
1030 .width = 7,
1031 },
1032 .hw.init = &(struct clk_init_data){
1033 .name = "vpu_1_div",
1034 .ops = &clk_regmap_divider_ops,
1035 .parent_names = (const char *[]){ "vpu_1_sel" },
1036 .num_parents = 1,
1037 .flags = CLK_SET_RATE_PARENT,
1038 },
1039};
1040
1041static struct clk_regmap g12a_vpu_1 = {
1042 .data = &(struct clk_regmap_gate_data){
1043 .offset = HHI_VPU_CLK_CNTL,
1044 .bit_idx = 24,
1045 },
1046 .hw.init = &(struct clk_init_data) {
1047 .name = "vpu_1",
1048 .ops = &clk_regmap_gate_ops,
1049 .parent_names = (const char *[]){ "vpu_1_div" },
1050 .num_parents = 1,
1051 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1052 },
1053};
1054
1055static struct clk_regmap g12a_vpu = {
1056 .data = &(struct clk_regmap_mux_data){
1057 .offset = HHI_VPU_CLK_CNTL,
1058 .mask = 1,
1059 .shift = 31,
1060 },
1061 .hw.init = &(struct clk_init_data){
1062 .name = "vpu",
1063 .ops = &clk_regmap_mux_ops,
1064 /*
1065 * bit 31 selects from 2 possible parents:
1066 * vpu_0 or vpu_1
1067 */
1068 .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
1069 .num_parents = 2,
1070 .flags = CLK_SET_RATE_NO_REPARENT,
1071 },
1072};
1073
1074/* VAPB Clock */
1075
1076static const char * const g12a_vapb_parent_names[] = {
1077 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
1078 "mpll1", "vid_pll", "mpll2", "fclk_div2p5",
1079};
1080
1081static struct clk_regmap g12a_vapb_0_sel = {
1082 .data = &(struct clk_regmap_mux_data){
1083 .offset = HHI_VAPBCLK_CNTL,
1084 .mask = 0x3,
1085 .shift = 9,
1086 },
1087 .hw.init = &(struct clk_init_data){
1088 .name = "vapb_0_sel",
1089 .ops = &clk_regmap_mux_ops,
1090 .parent_names = g12a_vapb_parent_names,
1091 .num_parents = ARRAY_SIZE(g12a_vapb_parent_names),
1092 .flags = CLK_SET_RATE_NO_REPARENT,
1093 },
1094};
1095
1096static struct clk_regmap g12a_vapb_0_div = {
1097 .data = &(struct clk_regmap_div_data){
1098 .offset = HHI_VAPBCLK_CNTL,
1099 .shift = 0,
1100 .width = 7,
1101 },
1102 .hw.init = &(struct clk_init_data){
1103 .name = "vapb_0_div",
1104 .ops = &clk_regmap_divider_ops,
1105 .parent_names = (const char *[]){ "vapb_0_sel" },
1106 .num_parents = 1,
1107 .flags = CLK_SET_RATE_PARENT,
1108 },
1109};
1110
1111static struct clk_regmap g12a_vapb_0 = {
1112 .data = &(struct clk_regmap_gate_data){
1113 .offset = HHI_VAPBCLK_CNTL,
1114 .bit_idx = 8,
1115 },
1116 .hw.init = &(struct clk_init_data) {
1117 .name = "vapb_0",
1118 .ops = &clk_regmap_gate_ops,
1119 .parent_names = (const char *[]){ "vapb_0_div" },
1120 .num_parents = 1,
1121 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1122 },
1123};
1124
1125static struct clk_regmap g12a_vapb_1_sel = {
1126 .data = &(struct clk_regmap_mux_data){
1127 .offset = HHI_VAPBCLK_CNTL,
1128 .mask = 0x3,
1129 .shift = 25,
1130 },
1131 .hw.init = &(struct clk_init_data){
1132 .name = "vapb_1_sel",
1133 .ops = &clk_regmap_mux_ops,
1134 .parent_names = g12a_vapb_parent_names,
1135 .num_parents = ARRAY_SIZE(g12a_vapb_parent_names),
1136 .flags = CLK_SET_RATE_NO_REPARENT,
1137 },
1138};
1139
1140static struct clk_regmap g12a_vapb_1_div = {
1141 .data = &(struct clk_regmap_div_data){
1142 .offset = HHI_VAPBCLK_CNTL,
1143 .shift = 16,
1144 .width = 7,
1145 },
1146 .hw.init = &(struct clk_init_data){
1147 .name = "vapb_1_div",
1148 .ops = &clk_regmap_divider_ops,
1149 .parent_names = (const char *[]){ "vapb_1_sel" },
1150 .num_parents = 1,
1151 .flags = CLK_SET_RATE_PARENT,
1152 },
1153};
1154
1155static struct clk_regmap g12a_vapb_1 = {
1156 .data = &(struct clk_regmap_gate_data){
1157 .offset = HHI_VAPBCLK_CNTL,
1158 .bit_idx = 24,
1159 },
1160 .hw.init = &(struct clk_init_data) {
1161 .name = "vapb_1",
1162 .ops = &clk_regmap_gate_ops,
1163 .parent_names = (const char *[]){ "vapb_1_div" },
1164 .num_parents = 1,
1165 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1166 },
1167};
1168
1169static struct clk_regmap g12a_vapb_sel = {
1170 .data = &(struct clk_regmap_mux_data){
1171 .offset = HHI_VAPBCLK_CNTL,
1172 .mask = 1,
1173 .shift = 31,
1174 },
1175 .hw.init = &(struct clk_init_data){
1176 .name = "vapb_sel",
1177 .ops = &clk_regmap_mux_ops,
1178 /*
1179 * bit 31 selects from 2 possible parents:
1180 * vapb_0 or vapb_1
1181 */
1182 .parent_names = (const char *[]){ "vapb_0", "vapb_1" },
1183 .num_parents = 2,
1184 .flags = CLK_SET_RATE_NO_REPARENT,
1185 },
1186};
1187
1188static struct clk_regmap g12a_vapb = {
1189 .data = &(struct clk_regmap_gate_data){
1190 .offset = HHI_VAPBCLK_CNTL,
1191 .bit_idx = 30,
1192 },
1193 .hw.init = &(struct clk_init_data) {
1194 .name = "vapb",
1195 .ops = &clk_regmap_gate_ops,
1196 .parent_names = (const char *[]){ "vapb_sel" },
1197 .num_parents = 1,
1198 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1199 },
1200};
1201
1202/* Video Clocks */
1203
1204static struct clk_regmap g12a_vid_pll_div = {
1205 .data = &(struct meson_vid_pll_div_data){
1206 .val = {
1207 .reg_off = HHI_VID_PLL_CLK_DIV,
1208 .shift = 0,
1209 .width = 15,
1210 },
1211 .sel = {
1212 .reg_off = HHI_VID_PLL_CLK_DIV,
1213 .shift = 16,
1214 .width = 2,
1215 },
1216 },
1217 .hw.init = &(struct clk_init_data) {
1218 .name = "vid_pll_div",
1219 .ops = &meson_vid_pll_div_ro_ops,
1220 .parent_names = (const char *[]){ "hdmi_pll" },
1221 .num_parents = 1,
1222 .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
1223 },
1224};
1225
1226static const char * const g12a_vid_pll_parent_names[] = { "vid_pll_div",
1227 "hdmi_pll" };
1228
1229static struct clk_regmap g12a_vid_pll_sel = {
1230 .data = &(struct clk_regmap_mux_data){
1231 .offset = HHI_VID_PLL_CLK_DIV,
1232 .mask = 0x1,
1233 .shift = 18,
1234 },
1235 .hw.init = &(struct clk_init_data){
1236 .name = "vid_pll_sel",
1237 .ops = &clk_regmap_mux_ops,
1238 /*
1239 * bit 18 selects from 2 possible parents:
1240 * vid_pll_div or hdmi_pll
1241 */
1242 .parent_names = g12a_vid_pll_parent_names,
1243 .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_names),
1244 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1245 },
1246};
1247
1248static struct clk_regmap g12a_vid_pll = {
1249 .data = &(struct clk_regmap_gate_data){
1250 .offset = HHI_VID_PLL_CLK_DIV,
1251 .bit_idx = 19,
1252 },
1253 .hw.init = &(struct clk_init_data) {
1254 .name = "vid_pll",
1255 .ops = &clk_regmap_gate_ops,
1256 .parent_names = (const char *[]){ "vid_pll_sel" },
1257 .num_parents = 1,
1258 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1259 },
1260};
1261
1262static const char * const g12a_vclk_parent_names[] = {
1263 "vid_pll", "gp0_pll", "hifi_pll", "mpll1", "fclk_div3", "fclk_div4",
1264 "fclk_div5", "fclk_div7"
1265};
1266
1267static struct clk_regmap g12a_vclk_sel = {
1268 .data = &(struct clk_regmap_mux_data){
1269 .offset = HHI_VID_CLK_CNTL,
1270 .mask = 0x7,
1271 .shift = 16,
1272 },
1273 .hw.init = &(struct clk_init_data){
1274 .name = "vclk_sel",
1275 .ops = &clk_regmap_mux_ops,
1276 .parent_names = g12a_vclk_parent_names,
1277 .num_parents = ARRAY_SIZE(g12a_vclk_parent_names),
1278 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1279 },
1280};
1281
1282static struct clk_regmap g12a_vclk2_sel = {
1283 .data = &(struct clk_regmap_mux_data){
1284 .offset = HHI_VIID_CLK_CNTL,
1285 .mask = 0x7,
1286 .shift = 16,
1287 },
1288 .hw.init = &(struct clk_init_data){
1289 .name = "vclk2_sel",
1290 .ops = &clk_regmap_mux_ops,
1291 .parent_names = g12a_vclk_parent_names,
1292 .num_parents = ARRAY_SIZE(g12a_vclk_parent_names),
1293 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1294 },
1295};
1296
1297static struct clk_regmap g12a_vclk_input = {
1298 .data = &(struct clk_regmap_gate_data){
1299 .offset = HHI_VID_CLK_DIV,
1300 .bit_idx = 16,
1301 },
1302 .hw.init = &(struct clk_init_data) {
1303 .name = "vclk_input",
1304 .ops = &clk_regmap_gate_ops,
1305 .parent_names = (const char *[]){ "vclk_sel" },
1306 .num_parents = 1,
1307 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1308 },
1309};
1310
1311static struct clk_regmap g12a_vclk2_input = {
1312 .data = &(struct clk_regmap_gate_data){
1313 .offset = HHI_VIID_CLK_DIV,
1314 .bit_idx = 16,
1315 },
1316 .hw.init = &(struct clk_init_data) {
1317 .name = "vclk2_input",
1318 .ops = &clk_regmap_gate_ops,
1319 .parent_names = (const char *[]){ "vclk2_sel" },
1320 .num_parents = 1,
1321 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1322 },
1323};
1324
1325static struct clk_regmap g12a_vclk_div = {
1326 .data = &(struct clk_regmap_div_data){
1327 .offset = HHI_VID_CLK_DIV,
1328 .shift = 0,
1329 .width = 8,
1330 },
1331 .hw.init = &(struct clk_init_data){
1332 .name = "vclk_div",
1333 .ops = &clk_regmap_divider_ops,
1334 .parent_names = (const char *[]){ "vclk_input" },
1335 .num_parents = 1,
1336 .flags = CLK_GET_RATE_NOCACHE,
1337 },
1338};
1339
1340static struct clk_regmap g12a_vclk2_div = {
1341 .data = &(struct clk_regmap_div_data){
1342 .offset = HHI_VIID_CLK_DIV,
1343 .shift = 0,
1344 .width = 8,
1345 },
1346 .hw.init = &(struct clk_init_data){
1347 .name = "vclk2_div",
1348 .ops = &clk_regmap_divider_ops,
1349 .parent_names = (const char *[]){ "vclk2_input" },
1350 .num_parents = 1,
1351 .flags = CLK_GET_RATE_NOCACHE,
1352 },
1353};
1354
1355static struct clk_regmap g12a_vclk = {
1356 .data = &(struct clk_regmap_gate_data){
1357 .offset = HHI_VID_CLK_CNTL,
1358 .bit_idx = 19,
1359 },
1360 .hw.init = &(struct clk_init_data) {
1361 .name = "vclk",
1362 .ops = &clk_regmap_gate_ops,
1363 .parent_names = (const char *[]){ "vclk_div" },
1364 .num_parents = 1,
1365 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1366 },
1367};
1368
1369static struct clk_regmap g12a_vclk2 = {
1370 .data = &(struct clk_regmap_gate_data){
1371 .offset = HHI_VIID_CLK_CNTL,
1372 .bit_idx = 19,
1373 },
1374 .hw.init = &(struct clk_init_data) {
1375 .name = "vclk2",
1376 .ops = &clk_regmap_gate_ops,
1377 .parent_names = (const char *[]){ "vclk2_div" },
1378 .num_parents = 1,
1379 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1380 },
1381};
1382
1383static struct clk_regmap g12a_vclk_div1 = {
1384 .data = &(struct clk_regmap_gate_data){
1385 .offset = HHI_VID_CLK_CNTL,
1386 .bit_idx = 0,
1387 },
1388 .hw.init = &(struct clk_init_data) {
1389 .name = "vclk_div1",
1390 .ops = &clk_regmap_gate_ops,
1391 .parent_names = (const char *[]){ "vclk" },
1392 .num_parents = 1,
1393 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1394 },
1395};
1396
1397static struct clk_regmap g12a_vclk_div2_en = {
1398 .data = &(struct clk_regmap_gate_data){
1399 .offset = HHI_VID_CLK_CNTL,
1400 .bit_idx = 1,
1401 },
1402 .hw.init = &(struct clk_init_data) {
1403 .name = "vclk_div2_en",
1404 .ops = &clk_regmap_gate_ops,
1405 .parent_names = (const char *[]){ "vclk" },
1406 .num_parents = 1,
1407 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1408 },
1409};
1410
1411static struct clk_regmap g12a_vclk_div4_en = {
1412 .data = &(struct clk_regmap_gate_data){
1413 .offset = HHI_VID_CLK_CNTL,
1414 .bit_idx = 2,
1415 },
1416 .hw.init = &(struct clk_init_data) {
1417 .name = "vclk_div4_en",
1418 .ops = &clk_regmap_gate_ops,
1419 .parent_names = (const char *[]){ "vclk" },
1420 .num_parents = 1,
1421 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1422 },
1423};
1424
1425static struct clk_regmap g12a_vclk_div6_en = {
1426 .data = &(struct clk_regmap_gate_data){
1427 .offset = HHI_VID_CLK_CNTL,
1428 .bit_idx = 3,
1429 },
1430 .hw.init = &(struct clk_init_data) {
1431 .name = "vclk_div6_en",
1432 .ops = &clk_regmap_gate_ops,
1433 .parent_names = (const char *[]){ "vclk" },
1434 .num_parents = 1,
1435 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1436 },
1437};
1438
1439static struct clk_regmap g12a_vclk_div12_en = {
1440 .data = &(struct clk_regmap_gate_data){
1441 .offset = HHI_VID_CLK_CNTL,
1442 .bit_idx = 4,
1443 },
1444 .hw.init = &(struct clk_init_data) {
1445 .name = "vclk_div12_en",
1446 .ops = &clk_regmap_gate_ops,
1447 .parent_names = (const char *[]){ "vclk" },
1448 .num_parents = 1,
1449 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1450 },
1451};
1452
1453static struct clk_regmap g12a_vclk2_div1 = {
1454 .data = &(struct clk_regmap_gate_data){
1455 .offset = HHI_VIID_CLK_CNTL,
1456 .bit_idx = 0,
1457 },
1458 .hw.init = &(struct clk_init_data) {
1459 .name = "vclk2_div1",
1460 .ops = &clk_regmap_gate_ops,
1461 .parent_names = (const char *[]){ "vclk2" },
1462 .num_parents = 1,
1463 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1464 },
1465};
1466
1467static struct clk_regmap g12a_vclk2_div2_en = {
1468 .data = &(struct clk_regmap_gate_data){
1469 .offset = HHI_VIID_CLK_CNTL,
1470 .bit_idx = 1,
1471 },
1472 .hw.init = &(struct clk_init_data) {
1473 .name = "vclk2_div2_en",
1474 .ops = &clk_regmap_gate_ops,
1475 .parent_names = (const char *[]){ "vclk2" },
1476 .num_parents = 1,
1477 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1478 },
1479};
1480
1481static struct clk_regmap g12a_vclk2_div4_en = {
1482 .data = &(struct clk_regmap_gate_data){
1483 .offset = HHI_VIID_CLK_CNTL,
1484 .bit_idx = 2,
1485 },
1486 .hw.init = &(struct clk_init_data) {
1487 .name = "vclk2_div4_en",
1488 .ops = &clk_regmap_gate_ops,
1489 .parent_names = (const char *[]){ "vclk2" },
1490 .num_parents = 1,
1491 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1492 },
1493};
1494
1495static struct clk_regmap g12a_vclk2_div6_en = {
1496 .data = &(struct clk_regmap_gate_data){
1497 .offset = HHI_VIID_CLK_CNTL,
1498 .bit_idx = 3,
1499 },
1500 .hw.init = &(struct clk_init_data) {
1501 .name = "vclk2_div6_en",
1502 .ops = &clk_regmap_gate_ops,
1503 .parent_names = (const char *[]){ "vclk2" },
1504 .num_parents = 1,
1505 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1506 },
1507};
1508
1509static struct clk_regmap g12a_vclk2_div12_en = {
1510 .data = &(struct clk_regmap_gate_data){
1511 .offset = HHI_VIID_CLK_CNTL,
1512 .bit_idx = 4,
1513 },
1514 .hw.init = &(struct clk_init_data) {
1515 .name = "vclk2_div12_en",
1516 .ops = &clk_regmap_gate_ops,
1517 .parent_names = (const char *[]){ "vclk2" },
1518 .num_parents = 1,
1519 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1520 },
1521};
1522
1523static struct clk_fixed_factor g12a_vclk_div2 = {
1524 .mult = 1,
1525 .div = 2,
1526 .hw.init = &(struct clk_init_data){
1527 .name = "vclk_div2",
1528 .ops = &clk_fixed_factor_ops,
1529 .parent_names = (const char *[]){ "vclk_div2_en" },
1530 .num_parents = 1,
1531 },
1532};
1533
1534static struct clk_fixed_factor g12a_vclk_div4 = {
1535 .mult = 1,
1536 .div = 4,
1537 .hw.init = &(struct clk_init_data){
1538 .name = "vclk_div4",
1539 .ops = &clk_fixed_factor_ops,
1540 .parent_names = (const char *[]){ "vclk_div4_en" },
1541 .num_parents = 1,
1542 },
1543};
1544
1545static struct clk_fixed_factor g12a_vclk_div6 = {
1546 .mult = 1,
1547 .div = 6,
1548 .hw.init = &(struct clk_init_data){
1549 .name = "vclk_div6",
1550 .ops = &clk_fixed_factor_ops,
1551 .parent_names = (const char *[]){ "vclk_div6_en" },
1552 .num_parents = 1,
1553 },
1554};
1555
1556static struct clk_fixed_factor g12a_vclk_div12 = {
1557 .mult = 1,
1558 .div = 12,
1559 .hw.init = &(struct clk_init_data){
1560 .name = "vclk_div12",
1561 .ops = &clk_fixed_factor_ops,
1562 .parent_names = (const char *[]){ "vclk_div12_en" },
1563 .num_parents = 1,
1564 },
1565};
1566
1567static struct clk_fixed_factor g12a_vclk2_div2 = {
1568 .mult = 1,
1569 .div = 2,
1570 .hw.init = &(struct clk_init_data){
1571 .name = "vclk2_div2",
1572 .ops = &clk_fixed_factor_ops,
1573 .parent_names = (const char *[]){ "vclk2_div2_en" },
1574 .num_parents = 1,
1575 },
1576};
1577
1578static struct clk_fixed_factor g12a_vclk2_div4 = {
1579 .mult = 1,
1580 .div = 4,
1581 .hw.init = &(struct clk_init_data){
1582 .name = "vclk2_div4",
1583 .ops = &clk_fixed_factor_ops,
1584 .parent_names = (const char *[]){ "vclk2_div4_en" },
1585 .num_parents = 1,
1586 },
1587};
1588
1589static struct clk_fixed_factor g12a_vclk2_div6 = {
1590 .mult = 1,
1591 .div = 6,
1592 .hw.init = &(struct clk_init_data){
1593 .name = "vclk2_div6",
1594 .ops = &clk_fixed_factor_ops,
1595 .parent_names = (const char *[]){ "vclk2_div6_en" },
1596 .num_parents = 1,
1597 },
1598};
1599
1600static struct clk_fixed_factor g12a_vclk2_div12 = {
1601 .mult = 1,
1602 .div = 12,
1603 .hw.init = &(struct clk_init_data){
1604 .name = "vclk2_div12",
1605 .ops = &clk_fixed_factor_ops,
1606 .parent_names = (const char *[]){ "vclk2_div12_en" },
1607 .num_parents = 1,
1608 },
1609};
1610
1611static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
1612static const char * const g12a_cts_parent_names[] = {
1613 "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
1614 "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
1615 "vclk2_div6", "vclk2_div12"
1616};
1617
1618static struct clk_regmap g12a_cts_enci_sel = {
1619 .data = &(struct clk_regmap_mux_data){
1620 .offset = HHI_VID_CLK_DIV,
1621 .mask = 0xf,
1622 .shift = 28,
1623 .table = mux_table_cts_sel,
1624 },
1625 .hw.init = &(struct clk_init_data){
1626 .name = "cts_enci_sel",
1627 .ops = &clk_regmap_mux_ops,
1628 .parent_names = g12a_cts_parent_names,
1629 .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
1630 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1631 },
1632};
1633
1634static struct clk_regmap g12a_cts_encp_sel = {
1635 .data = &(struct clk_regmap_mux_data){
1636 .offset = HHI_VID_CLK_DIV,
1637 .mask = 0xf,
1638 .shift = 20,
1639 .table = mux_table_cts_sel,
1640 },
1641 .hw.init = &(struct clk_init_data){
1642 .name = "cts_encp_sel",
1643 .ops = &clk_regmap_mux_ops,
1644 .parent_names = g12a_cts_parent_names,
1645 .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
1646 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1647 },
1648};
1649
1650static struct clk_regmap g12a_cts_vdac_sel = {
1651 .data = &(struct clk_regmap_mux_data){
1652 .offset = HHI_VIID_CLK_DIV,
1653 .mask = 0xf,
1654 .shift = 28,
1655 .table = mux_table_cts_sel,
1656 },
1657 .hw.init = &(struct clk_init_data){
1658 .name = "cts_vdac_sel",
1659 .ops = &clk_regmap_mux_ops,
1660 .parent_names = g12a_cts_parent_names,
1661 .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
1662 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1663 },
1664};
1665
1666/* TOFIX: add support for cts_tcon */
1667static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
1668static const char * const g12a_cts_hdmi_tx_parent_names[] = {
1669 "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
1670 "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
1671 "vclk2_div6", "vclk2_div12"
1672};
1673
1674static struct clk_regmap g12a_hdmi_tx_sel = {
1675 .data = &(struct clk_regmap_mux_data){
1676 .offset = HHI_HDMI_CLK_CNTL,
1677 .mask = 0xf,
1678 .shift = 16,
1679 .table = mux_table_hdmi_tx_sel,
1680 },
1681 .hw.init = &(struct clk_init_data){
1682 .name = "hdmi_tx_sel",
1683 .ops = &clk_regmap_mux_ops,
1684 .parent_names = g12a_cts_hdmi_tx_parent_names,
1685 .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_names),
1686 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1687 },
1688};
1689
1690static struct clk_regmap g12a_cts_enci = {
1691 .data = &(struct clk_regmap_gate_data){
1692 .offset = HHI_VID_CLK_CNTL2,
1693 .bit_idx = 0,
1694 },
1695 .hw.init = &(struct clk_init_data) {
1696 .name = "cts_enci",
1697 .ops = &clk_regmap_gate_ops,
1698 .parent_names = (const char *[]){ "cts_enci_sel" },
1699 .num_parents = 1,
1700 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1701 },
1702};
1703
1704static struct clk_regmap g12a_cts_encp = {
1705 .data = &(struct clk_regmap_gate_data){
1706 .offset = HHI_VID_CLK_CNTL2,
1707 .bit_idx = 2,
1708 },
1709 .hw.init = &(struct clk_init_data) {
1710 .name = "cts_encp",
1711 .ops = &clk_regmap_gate_ops,
1712 .parent_names = (const char *[]){ "cts_encp_sel" },
1713 .num_parents = 1,
1714 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1715 },
1716};
1717
1718static struct clk_regmap g12a_cts_vdac = {
1719 .data = &(struct clk_regmap_gate_data){
1720 .offset = HHI_VID_CLK_CNTL2,
1721 .bit_idx = 4,
1722 },
1723 .hw.init = &(struct clk_init_data) {
1724 .name = "cts_vdac",
1725 .ops = &clk_regmap_gate_ops,
1726 .parent_names = (const char *[]){ "cts_vdac_sel" },
1727 .num_parents = 1,
1728 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1729 },
1730};
1731
1732static struct clk_regmap g12a_hdmi_tx = {
1733 .data = &(struct clk_regmap_gate_data){
1734 .offset = HHI_VID_CLK_CNTL2,
1735 .bit_idx = 5,
1736 },
1737 .hw.init = &(struct clk_init_data) {
1738 .name = "hdmi_tx",
1739 .ops = &clk_regmap_gate_ops,
1740 .parent_names = (const char *[]){ "hdmi_tx_sel" },
1741 .num_parents = 1,
1742 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1743 },
1744};
1745
1746/* HDMI Clocks */
1747
1748static const char * const g12a_hdmi_parent_names[] = {
1749 IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
1750};
1751
1752static struct clk_regmap g12a_hdmi_sel = {
1753 .data = &(struct clk_regmap_mux_data){
1754 .offset = HHI_HDMI_CLK_CNTL,
1755 .mask = 0x3,
1756 .shift = 9,
1757 .flags = CLK_MUX_ROUND_CLOSEST,
1758 },
1759 .hw.init = &(struct clk_init_data){
1760 .name = "hdmi_sel",
1761 .ops = &clk_regmap_mux_ops,
1762 .parent_names = g12a_hdmi_parent_names,
1763 .num_parents = ARRAY_SIZE(g12a_hdmi_parent_names),
1764 .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
1765 },
1766};
1767
1768static struct clk_regmap g12a_hdmi_div = {
1769 .data = &(struct clk_regmap_div_data){
1770 .offset = HHI_HDMI_CLK_CNTL,
1771 .shift = 0,
1772 .width = 7,
1773 },
1774 .hw.init = &(struct clk_init_data){
1775 .name = "hdmi_div",
1776 .ops = &clk_regmap_divider_ops,
1777 .parent_names = (const char *[]){ "hdmi_sel" },
1778 .num_parents = 1,
1779 .flags = CLK_GET_RATE_NOCACHE,
1780 },
1781};
1782
1783static struct clk_regmap g12a_hdmi = {
1784 .data = &(struct clk_regmap_gate_data){
1785 .offset = HHI_HDMI_CLK_CNTL,
1786 .bit_idx = 8,
1787 },
1788 .hw.init = &(struct clk_init_data) {
1789 .name = "hdmi",
1790 .ops = &clk_regmap_gate_ops,
1791 .parent_names = (const char *[]){ "hdmi_div" },
1792 .num_parents = 1,
1793 .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1794 },
1795};
1796
1797/*
1798 * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
1799 * muxed by a glitch-free switch.
1800 */
1801
1802static const char * const g12a_mali_0_1_parent_names[] = {
1803 IN_PREFIX "xtal", "gp0_pll", "hihi_pll", "fclk_div2p5",
1804 "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7"
1805};
1806
1807static struct clk_regmap g12a_mali_0_sel = {
1808 .data = &(struct clk_regmap_mux_data){
1809 .offset = HHI_MALI_CLK_CNTL,
1810 .mask = 0x7,
1811 .shift = 9,
1812 },
1813 .hw.init = &(struct clk_init_data){
1814 .name = "mali_0_sel",
1815 .ops = &clk_regmap_mux_ops,
1816 .parent_names = g12a_mali_0_1_parent_names,
1817 .num_parents = 8,
1818 .flags = CLK_SET_RATE_NO_REPARENT,
1819 },
1820};
1821
1822static struct clk_regmap g12a_mali_0_div = {
1823 .data = &(struct clk_regmap_div_data){
1824 .offset = HHI_MALI_CLK_CNTL,
1825 .shift = 0,
1826 .width = 7,
1827 },
1828 .hw.init = &(struct clk_init_data){
1829 .name = "mali_0_div",
1830 .ops = &clk_regmap_divider_ops,
1831 .parent_names = (const char *[]){ "mali_0_sel" },
1832 .num_parents = 1,
1833 .flags = CLK_SET_RATE_NO_REPARENT,
1834 },
1835};
1836
1837static struct clk_regmap g12a_mali_0 = {
1838 .data = &(struct clk_regmap_gate_data){
1839 .offset = HHI_MALI_CLK_CNTL,
1840 .bit_idx = 8,
1841 },
1842 .hw.init = &(struct clk_init_data){
1843 .name = "mali_0",
1844 .ops = &clk_regmap_gate_ops,
1845 .parent_names = (const char *[]){ "mali_0_div" },
1846 .num_parents = 1,
1847 .flags = CLK_SET_RATE_PARENT,
1848 },
1849};
1850
1851static struct clk_regmap g12a_mali_1_sel = {
1852 .data = &(struct clk_regmap_mux_data){
1853 .offset = HHI_MALI_CLK_CNTL,
1854 .mask = 0x7,
1855 .shift = 25,
1856 },
1857 .hw.init = &(struct clk_init_data){
1858 .name = "mali_1_sel",
1859 .ops = &clk_regmap_mux_ops,
1860 .parent_names = g12a_mali_0_1_parent_names,
1861 .num_parents = 8,
1862 .flags = CLK_SET_RATE_NO_REPARENT,
1863 },
1864};
1865
1866static struct clk_regmap g12a_mali_1_div = {
1867 .data = &(struct clk_regmap_div_data){
1868 .offset = HHI_MALI_CLK_CNTL,
1869 .shift = 16,
1870 .width = 7,
1871 },
1872 .hw.init = &(struct clk_init_data){
1873 .name = "mali_1_div",
1874 .ops = &clk_regmap_divider_ops,
1875 .parent_names = (const char *[]){ "mali_1_sel" },
1876 .num_parents = 1,
1877 .flags = CLK_SET_RATE_NO_REPARENT,
1878 },
1879};
1880
1881static struct clk_regmap g12a_mali_1 = {
1882 .data = &(struct clk_regmap_gate_data){
1883 .offset = HHI_MALI_CLK_CNTL,
1884 .bit_idx = 24,
1885 },
1886 .hw.init = &(struct clk_init_data){
1887 .name = "mali_1",
1888 .ops = &clk_regmap_gate_ops,
1889 .parent_names = (const char *[]){ "mali_1_div" },
1890 .num_parents = 1,
1891 .flags = CLK_SET_RATE_PARENT,
1892 },
1893};
1894
1895static const char * const g12a_mali_parent_names[] = {
1896 "mali_0", "mali_1"
1897};
1898
1899static struct clk_regmap g12a_mali = {
1900 .data = &(struct clk_regmap_mux_data){
1901 .offset = HHI_MALI_CLK_CNTL,
1902 .mask = 1,
1903 .shift = 31,
1904 },
1905 .hw.init = &(struct clk_init_data){
1906 .name = "mali",
1907 .ops = &clk_regmap_mux_ops,
1908 .parent_names = g12a_mali_parent_names,
1909 .num_parents = 2,
1910 .flags = CLK_SET_RATE_NO_REPARENT,
1911 },
1912};
1913
1914/* Everything Else (EE) domain gates */
1915static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0);
1916static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1);
1917static MESON_GATE(g12a_audio_locker, HHI_GCLK_MPEG0, 2);
1918static MESON_GATE(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
1919static MESON_GATE(g12a_eth_phy, HHI_GCLK_MPEG0, 4);
1920static MESON_GATE(g12a_isa, HHI_GCLK_MPEG0, 5);
1921static MESON_GATE(g12a_pl301, HHI_GCLK_MPEG0, 6);
1922static MESON_GATE(g12a_periphs, HHI_GCLK_MPEG0, 7);
1923static MESON_GATE(g12a_spicc_0, HHI_GCLK_MPEG0, 8);
1924static MESON_GATE(g12a_i2c, HHI_GCLK_MPEG0, 9);
1925static MESON_GATE(g12a_sana, HHI_GCLK_MPEG0, 10);
1926static MESON_GATE(g12a_sd, HHI_GCLK_MPEG0, 11);
1927static MESON_GATE(g12a_rng0, HHI_GCLK_MPEG0, 12);
1928static MESON_GATE(g12a_uart0, HHI_GCLK_MPEG0, 13);
1929static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
1930static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
1931static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
1932static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
1933static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4);
1934static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
1935static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
1936static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);
1937
1938static MESON_GATE(g12a_audio, HHI_GCLK_MPEG1, 0);
1939static MESON_GATE(g12a_eth_core, HHI_GCLK_MPEG1, 3);
1940static MESON_GATE(g12a_demux, HHI_GCLK_MPEG1, 4);
1941static MESON_GATE(g12a_audio_ififo, HHI_GCLK_MPEG1, 11);
1942static MESON_GATE(g12a_adc, HHI_GCLK_MPEG1, 13);
1943static MESON_GATE(g12a_uart1, HHI_GCLK_MPEG1, 16);
1944static MESON_GATE(g12a_g2d, HHI_GCLK_MPEG1, 20);
1945static MESON_GATE(g12a_reset, HHI_GCLK_MPEG1, 23);
1946static MESON_GATE(g12a_pcie_comb, HHI_GCLK_MPEG1, 24);
1947static MESON_GATE(g12a_parser, HHI_GCLK_MPEG1, 25);
1948static MESON_GATE(g12a_usb_general, HHI_GCLK_MPEG1, 26);
1949static MESON_GATE(g12a_pcie_phy, HHI_GCLK_MPEG1, 27);
1950static MESON_GATE(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29);
1951
1952static MESON_GATE(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1);
1953static MESON_GATE(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
1954static MESON_GATE(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3);
1955static MESON_GATE(g12a_htx_pclk, HHI_GCLK_MPEG2, 4);
1956static MESON_GATE(g12a_bt656, HHI_GCLK_MPEG2, 6);
1957static MESON_GATE(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
1958static MESON_GATE(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11);
1959static MESON_GATE(g12a_uart2, HHI_GCLK_MPEG2, 15);
1960static MESON_GATE(g12a_vpu_intr, HHI_GCLK_MPEG2, 25);
1961static MESON_GATE(g12a_gic, HHI_GCLK_MPEG2, 30);
1962
1963static MESON_GATE(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1);
1964static MESON_GATE(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2);
1965static MESON_GATE(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3);
1966static MESON_GATE(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4);
1967static MESON_GATE(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5);
1968static MESON_GATE(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6);
1969static MESON_GATE(g12a_vclk2_other, HHI_GCLK_OTHER, 7);
1970static MESON_GATE(g12a_vclk2_enci, HHI_GCLK_OTHER, 8);
1971static MESON_GATE(g12a_vclk2_encp, HHI_GCLK_OTHER, 9);
1972static MESON_GATE(g12a_dac_clk, HHI_GCLK_OTHER, 10);
1973static MESON_GATE(g12a_aoclk_gate, HHI_GCLK_OTHER, 14);
1974static MESON_GATE(g12a_iec958_gate, HHI_GCLK_OTHER, 16);
1975static MESON_GATE(g12a_enc480p, HHI_GCLK_OTHER, 20);
1976static MESON_GATE(g12a_rng1, HHI_GCLK_OTHER, 21);
1977static MESON_GATE(g12a_vclk2_enct, HHI_GCLK_OTHER, 22);
1978static MESON_GATE(g12a_vclk2_encl, HHI_GCLK_OTHER, 23);
1979static MESON_GATE(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24);
1980static MESON_GATE(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25);
1981static MESON_GATE(g12a_vclk2_other1, HHI_GCLK_OTHER, 26);
1982
1983static MESON_GATE_RO(g12a_dma, HHI_GCLK_OTHER2, 0);
1984static MESON_GATE_RO(g12a_efuse, HHI_GCLK_OTHER2, 1);
1985static MESON_GATE_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2);
1986static MESON_GATE_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3);
1987static MESON_GATE_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4);
1988
1989/* Array of all clocks provided by this provider */
1990static struct clk_hw_onecell_data g12a_hw_onecell_data = {
1991 .hws = {
1992 [CLKID_SYS_PLL] = &g12a_sys_pll.hw,
1993 [CLKID_FIXED_PLL] = &g12a_fixed_pll.hw,
1994 [CLKID_FCLK_DIV2] = &g12a_fclk_div2.hw,
1995 [CLKID_FCLK_DIV3] = &g12a_fclk_div3.hw,
1996 [CLKID_FCLK_DIV4] = &g12a_fclk_div4.hw,
1997 [CLKID_FCLK_DIV5] = &g12a_fclk_div5.hw,
1998 [CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
1999 [CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
2000 [CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
2001 [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
2002 [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
2003 [CLKID_CLK81] = &g12a_clk81.hw,
2004 [CLKID_MPLL0] = &g12a_mpll0.hw,
2005 [CLKID_MPLL1] = &g12a_mpll1.hw,
2006 [CLKID_MPLL2] = &g12a_mpll2.hw,
2007 [CLKID_MPLL3] = &g12a_mpll3.hw,
2008 [CLKID_DDR] = &g12a_ddr.hw,
2009 [CLKID_DOS] = &g12a_dos.hw,
2010 [CLKID_AUDIO_LOCKER] = &g12a_audio_locker.hw,
2011 [CLKID_MIPI_DSI_HOST] = &g12a_mipi_dsi_host.hw,
2012 [CLKID_ETH_PHY] = &g12a_eth_phy.hw,
2013 [CLKID_ISA] = &g12a_isa.hw,
2014 [CLKID_PL301] = &g12a_pl301.hw,
2015 [CLKID_PERIPHS] = &g12a_periphs.hw,
2016 [CLKID_SPICC0] = &g12a_spicc_0.hw,
2017 [CLKID_I2C] = &g12a_i2c.hw,
2018 [CLKID_SANA] = &g12a_sana.hw,
2019 [CLKID_SD] = &g12a_sd.hw,
2020 [CLKID_RNG0] = &g12a_rng0.hw,
2021 [CLKID_UART0] = &g12a_uart0.hw,
2022 [CLKID_SPICC1] = &g12a_spicc_1.hw,
2023 [CLKID_HIU_IFACE] = &g12a_hiu_reg.hw,
2024 [CLKID_MIPI_DSI_PHY] = &g12a_mipi_dsi_phy.hw,
2025 [CLKID_ASSIST_MISC] = &g12a_assist_misc.hw,
2026 [CLKID_SD_EMMC_A] = &g12a_emmc_a.hw,
2027 [CLKID_SD_EMMC_B] = &g12a_emmc_b.hw,
2028 [CLKID_SD_EMMC_C] = &g12a_emmc_c.hw,
2029 [CLKID_AUDIO_CODEC] = &g12a_audio_codec.hw,
2030 [CLKID_AUDIO] = &g12a_audio.hw,
2031 [CLKID_ETH] = &g12a_eth_core.hw,
2032 [CLKID_DEMUX] = &g12a_demux.hw,
2033 [CLKID_AUDIO_IFIFO] = &g12a_audio_ififo.hw,
2034 [CLKID_ADC] = &g12a_adc.hw,
2035 [CLKID_UART1] = &g12a_uart1.hw,
2036 [CLKID_G2D] = &g12a_g2d.hw,
2037 [CLKID_RESET] = &g12a_reset.hw,
2038 [CLKID_PCIE_COMB] = &g12a_pcie_comb.hw,
2039 [CLKID_PARSER] = &g12a_parser.hw,
2040 [CLKID_USB] = &g12a_usb_general.hw,
2041 [CLKID_PCIE_PHY] = &g12a_pcie_phy.hw,
2042 [CLKID_AHB_ARB0] = &g12a_ahb_arb0.hw,
2043 [CLKID_AHB_DATA_BUS] = &g12a_ahb_data_bus.hw,
2044 [CLKID_AHB_CTRL_BUS] = &g12a_ahb_ctrl_bus.hw,
2045 [CLKID_HTX_HDCP22] = &g12a_htx_hdcp22.hw,
2046 [CLKID_HTX_PCLK] = &g12a_htx_pclk.hw,
2047 [CLKID_BT656] = &g12a_bt656.hw,
2048 [CLKID_USB1_DDR_BRIDGE] = &g12a_usb1_to_ddr.hw,
2049 [CLKID_MMC_PCLK] = &g12a_mmc_pclk.hw,
2050 [CLKID_UART2] = &g12a_uart2.hw,
2051 [CLKID_VPU_INTR] = &g12a_vpu_intr.hw,
2052 [CLKID_GIC] = &g12a_gic.hw,
2053 [CLKID_SD_EMMC_A_CLK0_SEL] = &g12a_sd_emmc_a_clk0_sel.hw,
2054 [CLKID_SD_EMMC_A_CLK0_DIV] = &g12a_sd_emmc_a_clk0_div.hw,
2055 [CLKID_SD_EMMC_A_CLK0] = &g12a_sd_emmc_a_clk0.hw,
2056 [CLKID_SD_EMMC_B_CLK0_SEL] = &g12a_sd_emmc_b_clk0_sel.hw,
2057 [CLKID_SD_EMMC_B_CLK0_DIV] = &g12a_sd_emmc_b_clk0_div.hw,
2058 [CLKID_SD_EMMC_B_CLK0] = &g12a_sd_emmc_b_clk0.hw,
2059 [CLKID_SD_EMMC_C_CLK0_SEL] = &g12a_sd_emmc_c_clk0_sel.hw,
2060 [CLKID_SD_EMMC_C_CLK0_DIV] = &g12a_sd_emmc_c_clk0_div.hw,
2061 [CLKID_SD_EMMC_C_CLK0] = &g12a_sd_emmc_c_clk0.hw,
2062 [CLKID_MPLL0_DIV] = &g12a_mpll0_div.hw,
2063 [CLKID_MPLL1_DIV] = &g12a_mpll1_div.hw,
2064 [CLKID_MPLL2_DIV] = &g12a_mpll2_div.hw,
2065 [CLKID_MPLL3_DIV] = &g12a_mpll3_div.hw,
2066 [CLKID_FCLK_DIV2_DIV] = &g12a_fclk_div2_div.hw,
2067 [CLKID_FCLK_DIV3_DIV] = &g12a_fclk_div3_div.hw,
2068 [CLKID_FCLK_DIV4_DIV] = &g12a_fclk_div4_div.hw,
2069 [CLKID_FCLK_DIV5_DIV] = &g12a_fclk_div5_div.hw,
2070 [CLKID_FCLK_DIV7_DIV] = &g12a_fclk_div7_div.hw,
2071 [CLKID_FCLK_DIV2P5_DIV] = &g12a_fclk_div2p5_div.hw,
2072 [CLKID_HIFI_PLL] = &g12a_hifi_pll.hw,
2073 [CLKID_VCLK2_VENCI0] = &g12a_vclk2_venci0.hw,
2074 [CLKID_VCLK2_VENCI1] = &g12a_vclk2_venci1.hw,
2075 [CLKID_VCLK2_VENCP0] = &g12a_vclk2_vencp0.hw,
2076 [CLKID_VCLK2_VENCP1] = &g12a_vclk2_vencp1.hw,
2077 [CLKID_VCLK2_VENCT0] = &g12a_vclk2_venct0.hw,
2078 [CLKID_VCLK2_VENCT1] = &g12a_vclk2_venct1.hw,
2079 [CLKID_VCLK2_OTHER] = &g12a_vclk2_other.hw,
2080 [CLKID_VCLK2_ENCI] = &g12a_vclk2_enci.hw,
2081 [CLKID_VCLK2_ENCP] = &g12a_vclk2_encp.hw,
2082 [CLKID_DAC_CLK] = &g12a_dac_clk.hw,
2083 [CLKID_AOCLK] = &g12a_aoclk_gate.hw,
2084 [CLKID_IEC958] = &g12a_iec958_gate.hw,
2085 [CLKID_ENC480P] = &g12a_enc480p.hw,
2086 [CLKID_RNG1] = &g12a_rng1.hw,
2087 [CLKID_VCLK2_ENCT] = &g12a_vclk2_enct.hw,
2088 [CLKID_VCLK2_ENCL] = &g12a_vclk2_encl.hw,
2089 [CLKID_VCLK2_VENCLMMC] = &g12a_vclk2_venclmmc.hw,
2090 [CLKID_VCLK2_VENCL] = &g12a_vclk2_vencl.hw,
2091 [CLKID_VCLK2_OTHER1] = &g12a_vclk2_other1.hw,
2092 [CLKID_FIXED_PLL_DCO] = &g12a_fixed_pll_dco.hw,
2093 [CLKID_SYS_PLL_DCO] = &g12a_sys_pll_dco.hw,
2094 [CLKID_GP0_PLL_DCO] = &g12a_gp0_pll_dco.hw,
2095 [CLKID_HIFI_PLL_DCO] = &g12a_hifi_pll_dco.hw,
2096 [CLKID_DMA] = &g12a_dma.hw,
2097 [CLKID_EFUSE] = &g12a_efuse.hw,
2098 [CLKID_ROM_BOOT] = &g12a_rom_boot.hw,
2099 [CLKID_RESET_SEC] = &g12a_reset_sec.hw,
2100 [CLKID_SEC_AHB_APB3] = &g12a_sec_ahb_apb3.hw,
2101 [CLKID_MPLL_PREDIV] = &g12a_mpll_prediv.hw,
2102 [CLKID_VPU_0_SEL] = &g12a_vpu_0_sel.hw,
2103 [CLKID_VPU_0_DIV] = &g12a_vpu_0_div.hw,
2104 [CLKID_VPU_0] = &g12a_vpu_0.hw,
2105 [CLKID_VPU_1_SEL] = &g12a_vpu_1_sel.hw,
2106 [CLKID_VPU_1_DIV] = &g12a_vpu_1_div.hw,
2107 [CLKID_VPU_1] = &g12a_vpu_1.hw,
2108 [CLKID_VPU] = &g12a_vpu.hw,
2109 [CLKID_VAPB_0_SEL] = &g12a_vapb_0_sel.hw,
2110 [CLKID_VAPB_0_DIV] = &g12a_vapb_0_div.hw,
2111 [CLKID_VAPB_0] = &g12a_vapb_0.hw,
2112 [CLKID_VAPB_1_SEL] = &g12a_vapb_1_sel.hw,
2113 [CLKID_VAPB_1_DIV] = &g12a_vapb_1_div.hw,
2114 [CLKID_VAPB_1] = &g12a_vapb_1.hw,
2115 [CLKID_VAPB_SEL] = &g12a_vapb_sel.hw,
2116 [CLKID_VAPB] = &g12a_vapb.hw,
2117 [CLKID_HDMI_PLL_DCO] = &g12a_hdmi_pll_dco.hw,
2118 [CLKID_HDMI_PLL_OD] = &g12a_hdmi_pll_od.hw,
2119 [CLKID_HDMI_PLL_OD2] = &g12a_hdmi_pll_od2.hw,
2120 [CLKID_HDMI_PLL] = &g12a_hdmi_pll.hw,
2121 [CLKID_VID_PLL] = &g12a_vid_pll_div.hw,
2122 [CLKID_VID_PLL_SEL] = &g12a_vid_pll_sel.hw,
2123 [CLKID_VID_PLL_DIV] = &g12a_vid_pll.hw,
2124 [CLKID_VCLK_SEL] = &g12a_vclk_sel.hw,
2125 [CLKID_VCLK2_SEL] = &g12a_vclk2_sel.hw,
2126 [CLKID_VCLK_INPUT] = &g12a_vclk_input.hw,
2127 [CLKID_VCLK2_INPUT] = &g12a_vclk2_input.hw,
2128 [CLKID_VCLK_DIV] = &g12a_vclk_div.hw,
2129 [CLKID_VCLK2_DIV] = &g12a_vclk2_div.hw,
2130 [CLKID_VCLK] = &g12a_vclk.hw,
2131 [CLKID_VCLK2] = &g12a_vclk2.hw,
2132 [CLKID_VCLK_DIV1] = &g12a_vclk_div1.hw,
2133 [CLKID_VCLK_DIV2_EN] = &g12a_vclk_div2_en.hw,
2134 [CLKID_VCLK_DIV4_EN] = &g12a_vclk_div4_en.hw,
2135 [CLKID_VCLK_DIV6_EN] = &g12a_vclk_div6_en.hw,
2136 [CLKID_VCLK_DIV12_EN] = &g12a_vclk_div12_en.hw,
2137 [CLKID_VCLK2_DIV1] = &g12a_vclk2_div1.hw,
2138 [CLKID_VCLK2_DIV2_EN] = &g12a_vclk2_div2_en.hw,
2139 [CLKID_VCLK2_DIV4_EN] = &g12a_vclk2_div4_en.hw,
2140 [CLKID_VCLK2_DIV6_EN] = &g12a_vclk2_div6_en.hw,
2141 [CLKID_VCLK2_DIV12_EN] = &g12a_vclk2_div12_en.hw,
2142 [CLKID_VCLK_DIV2] = &g12a_vclk_div2.hw,
2143 [CLKID_VCLK_DIV4] = &g12a_vclk_div4.hw,
2144 [CLKID_VCLK_DIV6] = &g12a_vclk_div6.hw,
2145 [CLKID_VCLK_DIV12] = &g12a_vclk_div12.hw,
2146 [CLKID_VCLK2_DIV2] = &g12a_vclk2_div2.hw,
2147 [CLKID_VCLK2_DIV4] = &g12a_vclk2_div4.hw,
2148 [CLKID_VCLK2_DIV6] = &g12a_vclk2_div6.hw,
2149 [CLKID_VCLK2_DIV12] = &g12a_vclk2_div12.hw,
2150 [CLKID_CTS_ENCI_SEL] = &g12a_cts_enci_sel.hw,
2151 [CLKID_CTS_ENCP_SEL] = &g12a_cts_encp_sel.hw,
2152 [CLKID_CTS_VDAC_SEL] = &g12a_cts_vdac_sel.hw,
2153 [CLKID_HDMI_TX_SEL] = &g12a_hdmi_tx_sel.hw,
2154 [CLKID_CTS_ENCI] = &g12a_cts_enci.hw,
2155 [CLKID_CTS_ENCP] = &g12a_cts_encp.hw,
2156 [CLKID_CTS_VDAC] = &g12a_cts_vdac.hw,
2157 [CLKID_HDMI_TX] = &g12a_hdmi_tx.hw,
2158 [CLKID_HDMI_SEL] = &g12a_hdmi_sel.hw,
2159 [CLKID_HDMI_DIV] = &g12a_hdmi_div.hw,
2160 [CLKID_HDMI] = &g12a_hdmi.hw,
2161 [CLKID_MALI_0_SEL] = &g12a_mali_0_sel.hw,
2162 [CLKID_MALI_0_DIV] = &g12a_mali_0_div.hw,
2163 [CLKID_MALI_0] = &g12a_mali_0.hw,
2164 [CLKID_MALI_1_SEL] = &g12a_mali_1_sel.hw,
2165 [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw,
2166 [CLKID_MALI_1] = &g12a_mali_1.hw,
2167 [CLKID_MALI] = &g12a_mali.hw,
2168 [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw,
2169 [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw,
2170 [NR_CLKS] = NULL,
2171 },
2172 .num = NR_CLKS,
2173};
2174
2175/* Convenience table to populate regmap in .probe */
2176static struct clk_regmap *const g12a_clk_regmaps[] = {
2177 &g12a_clk81,
2178 &g12a_dos,
2179 &g12a_ddr,
2180 &g12a_audio_locker,
2181 &g12a_mipi_dsi_host,
2182 &g12a_eth_phy,
2183 &g12a_isa,
2184 &g12a_pl301,
2185 &g12a_periphs,
2186 &g12a_spicc_0,
2187 &g12a_i2c,
2188 &g12a_sana,
2189 &g12a_sd,
2190 &g12a_rng0,
2191 &g12a_uart0,
2192 &g12a_spicc_1,
2193 &g12a_hiu_reg,
2194 &g12a_mipi_dsi_phy,
2195 &g12a_assist_misc,
2196 &g12a_emmc_a,
2197 &g12a_emmc_b,
2198 &g12a_emmc_c,
2199 &g12a_audio_codec,
2200 &g12a_audio,
2201 &g12a_eth_core,
2202 &g12a_demux,
2203 &g12a_audio_ififo,
2204 &g12a_adc,
2205 &g12a_uart1,
2206 &g12a_g2d,
2207 &g12a_reset,
2208 &g12a_pcie_comb,
2209 &g12a_parser,
2210 &g12a_usb_general,
2211 &g12a_pcie_phy,
2212 &g12a_ahb_arb0,
2213 &g12a_ahb_data_bus,
2214 &g12a_ahb_ctrl_bus,
2215 &g12a_htx_hdcp22,
2216 &g12a_htx_pclk,
2217 &g12a_bt656,
2218 &g12a_usb1_to_ddr,
2219 &g12a_mmc_pclk,
2220 &g12a_vpu_intr,
2221 &g12a_gic,
2222 &g12a_sd_emmc_a_clk0,
2223 &g12a_sd_emmc_b_clk0,
2224 &g12a_sd_emmc_c_clk0,
2225 &g12a_mpeg_clk_div,
2226 &g12a_sd_emmc_a_clk0_div,
2227 &g12a_sd_emmc_b_clk0_div,
2228 &g12a_sd_emmc_c_clk0_div,
2229 &g12a_mpeg_clk_sel,
2230 &g12a_sd_emmc_a_clk0_sel,
2231 &g12a_sd_emmc_b_clk0_sel,
2232 &g12a_sd_emmc_c_clk0_sel,
2233 &g12a_mpll0,
2234 &g12a_mpll1,
2235 &g12a_mpll2,
2236 &g12a_mpll3,
2237 &g12a_mpll0_div,
2238 &g12a_mpll1_div,
2239 &g12a_mpll2_div,
2240 &g12a_mpll3_div,
2241 &g12a_fixed_pll,
2242 &g12a_sys_pll,
2243 &g12a_gp0_pll,
2244 &g12a_hifi_pll,
2245 &g12a_vclk2_venci0,
2246 &g12a_vclk2_venci1,
2247 &g12a_vclk2_vencp0,
2248 &g12a_vclk2_vencp1,
2249 &g12a_vclk2_venct0,
2250 &g12a_vclk2_venct1,
2251 &g12a_vclk2_other,
2252 &g12a_vclk2_enci,
2253 &g12a_vclk2_encp,
2254 &g12a_dac_clk,
2255 &g12a_aoclk_gate,
2256 &g12a_iec958_gate,
2257 &g12a_enc480p,
2258 &g12a_rng1,
2259 &g12a_vclk2_enct,
2260 &g12a_vclk2_encl,
2261 &g12a_vclk2_venclmmc,
2262 &g12a_vclk2_vencl,
2263 &g12a_vclk2_other1,
2264 &g12a_fixed_pll_dco,
2265 &g12a_sys_pll_dco,
2266 &g12a_gp0_pll_dco,
2267 &g12a_hifi_pll_dco,
2268 &g12a_fclk_div2,
2269 &g12a_fclk_div3,
2270 &g12a_fclk_div4,
2271 &g12a_fclk_div5,
2272 &g12a_fclk_div7,
2273 &g12a_fclk_div2p5,
2274 &g12a_dma,
2275 &g12a_efuse,
2276 &g12a_rom_boot,
2277 &g12a_reset_sec,
2278 &g12a_sec_ahb_apb3,
2279 &g12a_vpu_0_sel,
2280 &g12a_vpu_0_div,
2281 &g12a_vpu_0,
2282 &g12a_vpu_1_sel,
2283 &g12a_vpu_1_div,
2284 &g12a_vpu_1,
2285 &g12a_vpu,
2286 &g12a_vapb_0_sel,
2287 &g12a_vapb_0_div,
2288 &g12a_vapb_0,
2289 &g12a_vapb_1_sel,
2290 &g12a_vapb_1_div,
2291 &g12a_vapb_1,
2292 &g12a_vapb_sel,
2293 &g12a_vapb,
2294 &g12a_hdmi_pll_dco,
2295 &g12a_hdmi_pll_od,
2296 &g12a_hdmi_pll_od2,
2297 &g12a_hdmi_pll,
2298 &g12a_vid_pll_div,
2299 &g12a_vid_pll_sel,
2300 &g12a_vid_pll,
2301 &g12a_vclk_sel,
2302 &g12a_vclk2_sel,
2303 &g12a_vclk_input,
2304 &g12a_vclk2_input,
2305 &g12a_vclk_div,
2306 &g12a_vclk2_div,
2307 &g12a_vclk,
2308 &g12a_vclk2,
2309 &g12a_vclk_div1,
2310 &g12a_vclk_div2_en,
2311 &g12a_vclk_div4_en,
2312 &g12a_vclk_div6_en,
2313 &g12a_vclk_div12_en,
2314 &g12a_vclk2_div1,
2315 &g12a_vclk2_div2_en,
2316 &g12a_vclk2_div4_en,
2317 &g12a_vclk2_div6_en,
2318 &g12a_vclk2_div12_en,
2319 &g12a_cts_enci_sel,
2320 &g12a_cts_encp_sel,
2321 &g12a_cts_vdac_sel,
2322 &g12a_hdmi_tx_sel,
2323 &g12a_cts_enci,
2324 &g12a_cts_encp,
2325 &g12a_cts_vdac,
2326 &g12a_hdmi_tx,
2327 &g12a_hdmi_sel,
2328 &g12a_hdmi_div,
2329 &g12a_hdmi,
2330 &g12a_mali_0_sel,
2331 &g12a_mali_0_div,
2332 &g12a_mali_0,
2333 &g12a_mali_1_sel,
2334 &g12a_mali_1_div,
2335 &g12a_mali_1,
2336 &g12a_mali,
2337 &g12a_mpll_50m,
2338};
2339
2340static const struct meson_eeclkc_data g12a_clkc_data = {
2341 .regmap_clks = g12a_clk_regmaps,
2342 .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
2343 .hw_onecell_data = &g12a_hw_onecell_data
2344};
2345
2346static const struct of_device_id clkc_match_table[] = {
2347 { .compatible = "amlogic,g12a-clkc", .data = &g12a_clkc_data },
2348 {}
2349};
2350
2351static struct platform_driver g12a_driver = {
2352 .probe = meson_eeclkc_probe,
2353 .driver = {
2354 .name = "g12a-clkc",
2355 .of_match_table = clkc_match_table,
2356 },
2357};
2358
2359builtin_platform_driver(g12a_driver);
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
new file mode 100644
index 000000000000..f399dfe1401c
--- /dev/null
+++ b/drivers/clk/meson/g12a.h
@@ -0,0 +1,175 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
2/*
3 * Copyright (c) 2016 Amlogic, Inc.
4 * Author: Michael Turquette <mturquette@baylibre.com>
5 *
6 * Copyright (c) 2018 Amlogic, inc.
7 * Author: Qiufang Dai <qiufang.dai@amlogic.com>
8 * Author: Jian Hu <jian.hu@amlogic.com>
9 *
10 */
11#ifndef __G12A_H
12#define __G12A_H
13
14/*
15 * Clock controller register offsets
16 *
17 * Register offsets from the data sheet must be multiplied by 4 before
18 * adding them to the base address to get the right value.
19 */
20#define HHI_MIPI_CNTL0 0x000
21#define HHI_MIPI_CNTL1 0x004
22#define HHI_MIPI_CNTL2 0x008
23#define HHI_MIPI_STS 0x00C
24#define HHI_GP0_PLL_CNTL0 0x040
25#define HHI_GP0_PLL_CNTL1 0x044
26#define HHI_GP0_PLL_CNTL2 0x048
27#define HHI_GP0_PLL_CNTL3 0x04C
28#define HHI_GP0_PLL_CNTL4 0x050
29#define HHI_GP0_PLL_CNTL5 0x054
30#define HHI_GP0_PLL_CNTL6 0x058
31#define HHI_GP0_PLL_STS 0x05C
32#define HHI_PCIE_PLL_CNTL0 0x098
33#define HHI_PCIE_PLL_CNTL1 0x09C
34#define HHI_PCIE_PLL_CNTL2 0x0A0
35#define HHI_PCIE_PLL_CNTL3 0x0A4
36#define HHI_PCIE_PLL_CNTL4 0x0A8
37#define HHI_PCIE_PLL_CNTL5 0x0AC
38#define HHI_PCIE_PLL_STS 0x0B8
39#define HHI_HIFI_PLL_CNTL0 0x0D8
40#define HHI_HIFI_PLL_CNTL1 0x0DC
41#define HHI_HIFI_PLL_CNTL2 0x0E0
42#define HHI_HIFI_PLL_CNTL3 0x0E4
43#define HHI_HIFI_PLL_CNTL4 0x0E8
44#define HHI_HIFI_PLL_CNTL5 0x0EC
45#define HHI_HIFI_PLL_CNTL6 0x0F0
46#define HHI_VIID_CLK_DIV 0x128
47#define HHI_VIID_CLK_CNTL 0x12C
48#define HHI_GCLK_MPEG0 0x140
49#define HHI_GCLK_MPEG1 0x144
50#define HHI_GCLK_MPEG2 0x148
51#define HHI_GCLK_OTHER 0x150
52#define HHI_GCLK_OTHER2 0x154
53#define HHI_VID_CLK_DIV 0x164
54#define HHI_MPEG_CLK_CNTL 0x174
55#define HHI_AUD_CLK_CNTL 0x178
56#define HHI_VID_CLK_CNTL 0x17c
57#define HHI_TS_CLK_CNTL 0x190
58#define HHI_VID_CLK_CNTL2 0x194
59#define HHI_SYS_CPU_CLK_CNTL0 0x19c
60#define HHI_VID_PLL_CLK_DIV 0x1A0
61#define HHI_MALI_CLK_CNTL 0x1b0
62#define HHI_VPU_CLKC_CNTL 0x1b4
63#define HHI_VPU_CLK_CNTL 0x1bC
64#define HHI_HDMI_CLK_CNTL 0x1CC
65#define HHI_VDEC_CLK_CNTL 0x1E0
66#define HHI_VDEC2_CLK_CNTL 0x1E4
67#define HHI_VDEC3_CLK_CNTL 0x1E8
68#define HHI_VDEC4_CLK_CNTL 0x1EC
69#define HHI_HDCP22_CLK_CNTL 0x1F0
70#define HHI_VAPBCLK_CNTL 0x1F4
71#define HHI_VPU_CLKB_CNTL 0x20C
72#define HHI_GEN_CLK_CNTL 0x228
73#define HHI_VDIN_MEAS_CLK_CNTL 0x250
74#define HHI_MIPIDSI_PHY_CLK_CNTL 0x254
75#define HHI_NAND_CLK_CNTL 0x25C
76#define HHI_SD_EMMC_CLK_CNTL 0x264
77#define HHI_MPLL_CNTL0 0x278
78#define HHI_MPLL_CNTL1 0x27C
79#define HHI_MPLL_CNTL2 0x280
80#define HHI_MPLL_CNTL3 0x284
81#define HHI_MPLL_CNTL4 0x288
82#define HHI_MPLL_CNTL5 0x28c
83#define HHI_MPLL_CNTL6 0x290
84#define HHI_MPLL_CNTL7 0x294
85#define HHI_MPLL_CNTL8 0x298
86#define HHI_FIX_PLL_CNTL0 0x2A0
87#define HHI_FIX_PLL_CNTL1 0x2A4
88#define HHI_FIX_PLL_CNTL3 0x2AC
89#define HHI_SYS_PLL_CNTL0 0x2f4
90#define HHI_SYS_PLL_CNTL1 0x2f8
91#define HHI_SYS_PLL_CNTL2 0x2fc
92#define HHI_SYS_PLL_CNTL3 0x300
93#define HHI_SYS_PLL_CNTL4 0x304
94#define HHI_SYS_PLL_CNTL5 0x308
95#define HHI_SYS_PLL_CNTL6 0x30c
96#define HHI_HDMI_PLL_CNTL0 0x320
97#define HHI_HDMI_PLL_CNTL1 0x324
98#define HHI_HDMI_PLL_CNTL2 0x328
99#define HHI_HDMI_PLL_CNTL3 0x32c
100#define HHI_HDMI_PLL_CNTL4 0x330
101#define HHI_HDMI_PLL_CNTL5 0x334
102#define HHI_HDMI_PLL_CNTL6 0x338
103#define HHI_SPICC_CLK_CNTL 0x3dc
104
105/*
106 * CLKID index values
107 *
108 * These indices are entirely contrived and do not map onto the hardware.
109 * It has now been decided to expose everything by default in the DT header:
110 * include/dt-bindings/clock/g12a-clkc.h. Only the clocks ids we don't want
111 * to expose, such as the internal muxes and dividers of composite clocks,
112 * will remain defined here.
113 */
114#define CLKID_MPEG_SEL 8
115#define CLKID_MPEG_DIV 9
116#define CLKID_SD_EMMC_A_CLK0_SEL 63
117#define CLKID_SD_EMMC_A_CLK0_DIV 64
118#define CLKID_SD_EMMC_B_CLK0_SEL 65
119#define CLKID_SD_EMMC_B_CLK0_DIV 66
120#define CLKID_SD_EMMC_C_CLK0_SEL 67
121#define CLKID_SD_EMMC_C_CLK0_DIV 68
122#define CLKID_MPLL0_DIV 69
123#define CLKID_MPLL1_DIV 70
124#define CLKID_MPLL2_DIV 71
125#define CLKID_MPLL3_DIV 72
126#define CLKID_MPLL_PREDIV 73
127#define CLKID_FCLK_DIV2_DIV 75
128#define CLKID_FCLK_DIV3_DIV 76
129#define CLKID_FCLK_DIV4_DIV 77
130#define CLKID_FCLK_DIV5_DIV 78
131#define CLKID_FCLK_DIV7_DIV 79
132#define CLKID_FCLK_DIV2P5_DIV 100
133#define CLKID_FIXED_PLL_DCO 101
134#define CLKID_SYS_PLL_DCO 102
135#define CLKID_GP0_PLL_DCO 103
136#define CLKID_HIFI_PLL_DCO 104
137#define CLKID_VPU_0_DIV 111
138#define CLKID_VPU_1_DIV 114
139#define CLKID_VAPB_0_DIV 118
140#define CLKID_VAPB_1_DIV 121
141#define CLKID_HDMI_PLL_DCO 125
142#define CLKID_HDMI_PLL_OD 126
143#define CLKID_HDMI_PLL_OD2 127
144#define CLKID_VID_PLL_SEL 130
145#define CLKID_VID_PLL_DIV 131
146#define CLKID_VCLK_SEL 132
147#define CLKID_VCLK2_SEL 133
148#define CLKID_VCLK_INPUT 134
149#define CLKID_VCLK2_INPUT 135
150#define CLKID_VCLK_DIV 136
151#define CLKID_VCLK2_DIV 137
152#define CLKID_VCLK_DIV2_EN 140
153#define CLKID_VCLK_DIV4_EN 141
154#define CLKID_VCLK_DIV6_EN 142
155#define CLKID_VCLK_DIV12_EN 143
156#define CLKID_VCLK2_DIV2_EN 144
157#define CLKID_VCLK2_DIV4_EN 145
158#define CLKID_VCLK2_DIV6_EN 146
159#define CLKID_VCLK2_DIV12_EN 147
160#define CLKID_CTS_ENCI_SEL 158
161#define CLKID_CTS_ENCP_SEL 159
162#define CLKID_CTS_VDAC_SEL 160
163#define CLKID_HDMI_TX_SEL 161
164#define CLKID_HDMI_SEL 166
165#define CLKID_HDMI_DIV 167
166#define CLKID_MALI_0_DIV 170
167#define CLKID_MALI_1_DIV 173
168#define CLKID_MPLL_5OM_DIV 176
169
170#define NR_CLKS 178
171
172/* include the CLKIDs that have been made part of the DT binding */
173#include <dt-bindings/clock/g12a-clkc.h>
174
175#endif /* __G12A_H */
diff --git a/drivers/clk/meson/gxbb-aoclk-32k.c b/drivers/clk/meson/gxbb-aoclk-32k.c
deleted file mode 100644
index 680467141a1d..000000000000
--- a/drivers/clk/meson/gxbb-aoclk-32k.c
+++ /dev/null
@@ -1,193 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2017 BayLibre, SAS.
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 */
6
7#include <linux/clk-provider.h>
8#include <linux/bitfield.h>
9#include <linux/regmap.h>
10#include "gxbb-aoclk.h"
11
12/*
13 * The AO Domain embeds a dual/divider to generate a more precise
14 * 32,768KHz clock for low-power suspend mode and CEC.
15 * ______ ______
16 * | | | |
17 * ______ | Div1 |-| Cnt1 | ______
18 * | | /|______| |______|\ | |
19 * Xtal-->| Gate |---| ______ ______ X-X--| Gate |-->
20 * |______| | \| | | |/ | |______|
21 * | | Div2 |-| Cnt2 | |
22 * | |______| |______| |
23 * |_______________________|
24 *
25 * The dividing can be switched to single or dual, with a counter
26 * for each divider to set when the switching is done.
27 * The entire dividing mechanism can be also bypassed.
28 */
29
30#define CLK_CNTL0_N1_MASK GENMASK(11, 0)
31#define CLK_CNTL0_N2_MASK GENMASK(23, 12)
32#define CLK_CNTL0_DUALDIV_EN BIT(28)
33#define CLK_CNTL0_OUT_GATE_EN BIT(30)
34#define CLK_CNTL0_IN_GATE_EN BIT(31)
35
36#define CLK_CNTL1_M1_MASK GENMASK(11, 0)
37#define CLK_CNTL1_M2_MASK GENMASK(23, 12)
38#define CLK_CNTL1_BYPASS_EN BIT(24)
39#define CLK_CNTL1_SELECT_OSC BIT(27)
40
41#define PWR_CNTL_ALT_32K_SEL GENMASK(13, 10)
42
43struct cec_32k_freq_table {
44 unsigned long parent_rate;
45 unsigned long target_rate;
46 bool dualdiv;
47 unsigned int n1;
48 unsigned int n2;
49 unsigned int m1;
50 unsigned int m2;
51};
52
53static const struct cec_32k_freq_table aoclk_cec_32k_table[] = {
54 [0] = {
55 .parent_rate = 24000000,
56 .target_rate = 32768,
57 .dualdiv = true,
58 .n1 = 733,
59 .n2 = 732,
60 .m1 = 8,
61 .m2 = 11,
62 },
63};
64
65/*
66 * If CLK_CNTL0_DUALDIV_EN == 0
67 * - will use N1 divider only
68 * If CLK_CNTL0_DUALDIV_EN == 1
69 * - hold M1 cycles of N1 divider then changes to N2
70 * - hold M2 cycles of N2 divider then changes to N1
71 * Then we can get more accurate division.
72 */
73static unsigned long aoclk_cec_32k_recalc_rate(struct clk_hw *hw,
74 unsigned long parent_rate)
75{
76 struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw);
77 unsigned long n1;
78 u32 reg0, reg1;
79
80 regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, &reg0);
81 regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, &reg1);
82
83 if (reg1 & CLK_CNTL1_BYPASS_EN)
84 return parent_rate;
85
86 if (reg0 & CLK_CNTL0_DUALDIV_EN) {
87 unsigned long n2, m1, m2, f1, f2, p1, p2;
88
89 n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1;
90 n2 = FIELD_GET(CLK_CNTL0_N2_MASK, reg0) + 1;
91
92 m1 = FIELD_GET(CLK_CNTL1_M1_MASK, reg1) + 1;
93 m2 = FIELD_GET(CLK_CNTL1_M2_MASK, reg1) + 1;
94
95 f1 = DIV_ROUND_CLOSEST(parent_rate, n1);
96 f2 = DIV_ROUND_CLOSEST(parent_rate, n2);
97
98 p1 = DIV_ROUND_CLOSEST(100000000 * m1, f1 * (m1 + m2));
99 p2 = DIV_ROUND_CLOSEST(100000000 * m2, f2 * (m1 + m2));
100
101 return DIV_ROUND_UP(100000000, p1 + p2);
102 }
103
104 n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1;
105
106 return DIV_ROUND_CLOSEST(parent_rate, n1);
107}
108
109static const struct cec_32k_freq_table *find_cec_32k_freq(unsigned long rate,
110 unsigned long prate)
111{
112 int i;
113
114 for (i = 0 ; i < ARRAY_SIZE(aoclk_cec_32k_table) ; ++i)
115 if (aoclk_cec_32k_table[i].parent_rate == prate &&
116 aoclk_cec_32k_table[i].target_rate == rate)
117 return &aoclk_cec_32k_table[i];
118
119 return NULL;
120}
121
122static long aoclk_cec_32k_round_rate(struct clk_hw *hw, unsigned long rate,
123 unsigned long *prate)
124{
125 const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate,
126 *prate);
127
128 /* If invalid return first one */
129 if (!freq)
130 return aoclk_cec_32k_table[0].target_rate;
131
132 return freq->target_rate;
133}
134
135/*
136 * From the Amlogic init procedure, the IN and OUT gates needs to be handled
137 * in the init procedure to avoid any glitches.
138 */
139
140static int aoclk_cec_32k_set_rate(struct clk_hw *hw, unsigned long rate,
141 unsigned long parent_rate)
142{
143 const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate,
144 parent_rate);
145 struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw);
146 u32 reg = 0;
147
148 if (!freq)
149 return -EINVAL;
150
151 /* Disable clock */
152 regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
153 CLK_CNTL0_IN_GATE_EN | CLK_CNTL0_OUT_GATE_EN, 0);
154
155 reg = FIELD_PREP(CLK_CNTL0_N1_MASK, freq->n1 - 1);
156 if (freq->dualdiv)
157 reg |= CLK_CNTL0_DUALDIV_EN |
158 FIELD_PREP(CLK_CNTL0_N2_MASK, freq->n2 - 1);
159
160 regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, reg);
161
162 reg = FIELD_PREP(CLK_CNTL1_M1_MASK, freq->m1 - 1);
163 if (freq->dualdiv)
164 reg |= FIELD_PREP(CLK_CNTL1_M2_MASK, freq->m2 - 1);
165
166 regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, reg);
167
168 /* Enable clock */
169 regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
170 CLK_CNTL0_IN_GATE_EN, CLK_CNTL0_IN_GATE_EN);
171
172 udelay(200);
173
174 regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
175 CLK_CNTL0_OUT_GATE_EN, CLK_CNTL0_OUT_GATE_EN);
176
177 regmap_update_bits(cec_32k->regmap, AO_CRT_CLK_CNTL1,
178 CLK_CNTL1_SELECT_OSC, CLK_CNTL1_SELECT_OSC);
179
180 /* Select 32k from XTAL */
181 regmap_update_bits(cec_32k->regmap,
182 AO_RTI_PWR_CNTL_REG0,
183 PWR_CNTL_ALT_32K_SEL,
184 FIELD_PREP(PWR_CNTL_ALT_32K_SEL, 4));
185
186 return 0;
187}
188
189const struct clk_ops meson_aoclk_cec_32k_ops = {
190 .recalc_rate = aoclk_cec_32k_recalc_rate,
191 .round_rate = aoclk_cec_32k_round_rate,
192 .set_rate = aoclk_cec_32k_set_rate,
193};
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 42ed61d3c3fb..449f6ac189d8 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -5,10 +5,23 @@
5 */ 5 */
6#include <linux/platform_device.h> 6#include <linux/platform_device.h>
7#include <linux/mfd/syscon.h> 7#include <linux/mfd/syscon.h>
8#include "clk-regmap.h"
9#include "meson-aoclk.h" 8#include "meson-aoclk.h"
10#include "gxbb-aoclk.h" 9#include "gxbb-aoclk.h"
11 10
11#include "clk-regmap.h"
12#include "clk-dualdiv.h"
13
14#define IN_PREFIX "ao-in-"
15
16/* AO Configuration Clock registers offsets */
17#define AO_RTI_PWR_CNTL_REG1 0x0c
18#define AO_RTI_PWR_CNTL_REG0 0x10
19#define AO_RTI_GEN_CNTL_REG0 0x40
20#define AO_OSCIN_CNTL 0x58
21#define AO_CRT_CLK_CNTL1 0x68
22#define AO_RTC_ALT_CLK_CNTL0 0x94
23#define AO_RTC_ALT_CLK_CNTL1 0x98
24
12#define GXBB_AO_GATE(_name, _bit) \ 25#define GXBB_AO_GATE(_name, _bit) \
13static struct clk_regmap _name##_ao = { \ 26static struct clk_regmap _name##_ao = { \
14 .data = &(struct clk_regmap_gate_data) { \ 27 .data = &(struct clk_regmap_gate_data) { \
@@ -18,7 +31,7 @@ static struct clk_regmap _name##_ao = { \
18 .hw.init = &(struct clk_init_data) { \ 31 .hw.init = &(struct clk_init_data) { \
19 .name = #_name "_ao", \ 32 .name = #_name "_ao", \
20 .ops = &clk_regmap_gate_ops, \ 33 .ops = &clk_regmap_gate_ops, \
21 .parent_names = (const char *[]){ "clk81" }, \ 34 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
22 .num_parents = 1, \ 35 .num_parents = 1, \
23 .flags = CLK_IGNORE_UNUSED, \ 36 .flags = CLK_IGNORE_UNUSED, \
24 }, \ 37 }, \
@@ -31,13 +44,174 @@ GXBB_AO_GATE(uart1, 3);
31GXBB_AO_GATE(uart2, 5); 44GXBB_AO_GATE(uart2, 5);
32GXBB_AO_GATE(ir_blaster, 6); 45GXBB_AO_GATE(ir_blaster, 6);
33 46
34static struct aoclk_cec_32k cec_32k_ao = { 47static struct clk_regmap ao_cts_oscin = {
35 .hw.init = &(struct clk_init_data) { 48 .data = &(struct clk_regmap_gate_data){
36 .name = "cec_32k_ao", 49 .offset = AO_RTI_PWR_CNTL_REG0,
37 .ops = &meson_aoclk_cec_32k_ops, 50 .bit_idx = 6,
38 .parent_names = (const char *[]){ "xtal" }, 51 },
52 .hw.init = &(struct clk_init_data){
53 .name = "ao_cts_oscin",
54 .ops = &clk_regmap_gate_ro_ops,
55 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
56 .num_parents = 1,
57 },
58};
59
60static struct clk_regmap ao_32k_pre = {
61 .data = &(struct clk_regmap_gate_data){
62 .offset = AO_RTC_ALT_CLK_CNTL0,
63 .bit_idx = 31,
64 },
65 .hw.init = &(struct clk_init_data){
66 .name = "ao_32k_pre",
67 .ops = &clk_regmap_gate_ops,
68 .parent_names = (const char *[]){ "ao_cts_oscin" },
69 .num_parents = 1,
70 },
71};
72
73static const struct meson_clk_dualdiv_param gxbb_32k_div_table[] = {
74 {
75 .dual = 1,
76 .n1 = 733,
77 .m1 = 8,
78 .n2 = 732,
79 .m2 = 11,
80 }, {}
81};
82
83static struct clk_regmap ao_32k_div = {
84 .data = &(struct meson_clk_dualdiv_data){
85 .n1 = {
86 .reg_off = AO_RTC_ALT_CLK_CNTL0,
87 .shift = 0,
88 .width = 12,
89 },
90 .n2 = {
91 .reg_off = AO_RTC_ALT_CLK_CNTL0,
92 .shift = 12,
93 .width = 12,
94 },
95 .m1 = {
96 .reg_off = AO_RTC_ALT_CLK_CNTL1,
97 .shift = 0,
98 .width = 12,
99 },
100 .m2 = {
101 .reg_off = AO_RTC_ALT_CLK_CNTL1,
102 .shift = 12,
103 .width = 12,
104 },
105 .dual = {
106 .reg_off = AO_RTC_ALT_CLK_CNTL0,
107 .shift = 28,
108 .width = 1,
109 },
110 .table = gxbb_32k_div_table,
111 },
112 .hw.init = &(struct clk_init_data){
113 .name = "ao_32k_div",
114 .ops = &meson_clk_dualdiv_ops,
115 .parent_names = (const char *[]){ "ao_32k_pre" },
116 .num_parents = 1,
117 },
118};
119
120static struct clk_regmap ao_32k_sel = {
121 .data = &(struct clk_regmap_mux_data) {
122 .offset = AO_RTC_ALT_CLK_CNTL1,
123 .mask = 0x1,
124 .shift = 24,
125 .flags = CLK_MUX_ROUND_CLOSEST,
126 },
127 .hw.init = &(struct clk_init_data){
128 .name = "ao_32k_sel",
129 .ops = &clk_regmap_mux_ops,
130 .parent_names = (const char *[]){ "ao_32k_div",
131 "ao_32k_pre" },
132 .num_parents = 2,
133 .flags = CLK_SET_RATE_PARENT,
134 },
135};
136
137static struct clk_regmap ao_32k = {
138 .data = &(struct clk_regmap_gate_data){
139 .offset = AO_RTC_ALT_CLK_CNTL0,
140 .bit_idx = 30,
141 },
142 .hw.init = &(struct clk_init_data){
143 .name = "ao_32k",
144 .ops = &clk_regmap_gate_ops,
145 .parent_names = (const char *[]){ "ao_32k_sel" },
39 .num_parents = 1, 146 .num_parents = 1,
40 .flags = CLK_IGNORE_UNUSED, 147 .flags = CLK_SET_RATE_PARENT,
148 },
149};
150
151static struct clk_regmap ao_cts_rtc_oscin = {
152 .data = &(struct clk_regmap_mux_data) {
153 .offset = AO_RTI_PWR_CNTL_REG0,
154 .mask = 0x7,
155 .shift = 10,
156 .table = (u32[]){ 1, 2, 3, 4 },
157 .flags = CLK_MUX_ROUND_CLOSEST,
158 },
159 .hw.init = &(struct clk_init_data){
160 .name = "ao_cts_rtc_oscin",
161 .ops = &clk_regmap_mux_ops,
162 .parent_names = (const char *[]){ IN_PREFIX "ext-32k-0",
163 IN_PREFIX "ext-32k-1",
164 IN_PREFIX "ext-32k-2",
165 "ao_32k" },
166 .num_parents = 4,
167 .flags = CLK_SET_RATE_PARENT,
168 },
169};
170
171static struct clk_regmap ao_clk81 = {
172 .data = &(struct clk_regmap_mux_data) {
173 .offset = AO_RTI_PWR_CNTL_REG0,
174 .mask = 0x1,
175 .shift = 0,
176 .flags = CLK_MUX_ROUND_CLOSEST,
177 },
178 .hw.init = &(struct clk_init_data){
179 .name = "ao_clk81",
180 .ops = &clk_regmap_mux_ro_ops,
181 .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
182 "ao_cts_rtc_oscin" },
183 .num_parents = 2,
184 .flags = CLK_SET_RATE_PARENT,
185 },
186};
187
188static struct clk_regmap ao_cts_cec = {
189 .data = &(struct clk_regmap_mux_data) {
190 .offset = AO_CRT_CLK_CNTL1,
191 .mask = 0x1,
192 .shift = 27,
193 .flags = CLK_MUX_ROUND_CLOSEST,
194 },
195 .hw.init = &(struct clk_init_data){
196 .name = "ao_cts_cec",
197 .ops = &clk_regmap_mux_ops,
198 /*
199 * FIXME: The 'fixme' parent obviously does not exist.
200 *
201 * ATM, CCF won't call get_parent() if num_parents is 1. It
202 * does not allow NULL as a parent name either.
203 *
204 * On this particular mux, we only know the input #1 parent
205 * but, on boot, unknown input #0 is set, so it is critical
206 * to call .get_parent() on it
207 *
208 * Until CCF gets fixed, adding this fake parent that won't
209 * ever be registered should work around the problem
210 */
211 .parent_names = (const char *[]){ "fixme",
212 "ao_cts_rtc_oscin" },
213 .num_parents = 2,
214 .flags = CLK_SET_RATE_PARENT,
41 }, 215 },
42}; 216};
43 217
@@ -50,13 +224,21 @@ static const unsigned int gxbb_aoclk_reset[] = {
50 [RESET_AO_IR_BLASTER] = 23, 224 [RESET_AO_IR_BLASTER] = 23,
51}; 225};
52 226
53static struct clk_regmap *gxbb_aoclk_gate[] = { 227static struct clk_regmap *gxbb_aoclk[] = {
54 [CLKID_AO_REMOTE] = &remote_ao, 228 &remote_ao,
55 [CLKID_AO_I2C_MASTER] = &i2c_master_ao, 229 &i2c_master_ao,
56 [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao, 230 &i2c_slave_ao,
57 [CLKID_AO_UART1] = &uart1_ao, 231 &uart1_ao,
58 [CLKID_AO_UART2] = &uart2_ao, 232 &uart2_ao,
59 [CLKID_AO_IR_BLASTER] = &ir_blaster_ao, 233 &ir_blaster_ao,
234 &ao_cts_oscin,
235 &ao_32k_pre,
236 &ao_32k_div,
237 &ao_32k_sel,
238 &ao_32k,
239 &ao_cts_rtc_oscin,
240 &ao_clk81,
241 &ao_cts_cec,
60}; 242};
61 243
62static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = { 244static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
@@ -67,52 +249,38 @@ static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
67 [CLKID_AO_UART1] = &uart1_ao.hw, 249 [CLKID_AO_UART1] = &uart1_ao.hw,
68 [CLKID_AO_UART2] = &uart2_ao.hw, 250 [CLKID_AO_UART2] = &uart2_ao.hw,
69 [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw, 251 [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
70 [CLKID_AO_CEC_32K] = &cec_32k_ao.hw, 252 [CLKID_AO_CEC_32K] = &ao_cts_cec.hw,
253 [CLKID_AO_CTS_OSCIN] = &ao_cts_oscin.hw,
254 [CLKID_AO_32K_PRE] = &ao_32k_pre.hw,
255 [CLKID_AO_32K_DIV] = &ao_32k_div.hw,
256 [CLKID_AO_32K_SEL] = &ao_32k_sel.hw,
257 [CLKID_AO_32K] = &ao_32k.hw,
258 [CLKID_AO_CTS_RTC_OSCIN] = &ao_cts_rtc_oscin.hw,
259 [CLKID_AO_CLK81] = &ao_clk81.hw,
71 }, 260 },
72 .num = NR_CLKS, 261 .num = NR_CLKS,
73}; 262};
74 263
75static int gxbb_register_cec_ao_32k(struct platform_device *pdev) 264static const struct meson_aoclk_input gxbb_aoclk_inputs[] = {
76{ 265 { .name = "xtal", .required = true, },
77 struct device *dev = &pdev->dev; 266 { .name = "mpeg-clk", .required = true, },
78 struct regmap *regmap; 267 {. name = "ext-32k-0", .required = false, },
79 int ret; 268 {. name = "ext-32k-1", .required = false, },
80 269 {. name = "ext-32k-2", .required = false, },
81 regmap = syscon_node_to_regmap(of_get_parent(dev->of_node)); 270};
82 if (IS_ERR(regmap)) {
83 dev_err(dev, "failed to get regmap\n");
84 return PTR_ERR(regmap);
85 }
86
87 /* Specific clocks */
88 cec_32k_ao.regmap = regmap;
89 ret = devm_clk_hw_register(dev, &cec_32k_ao.hw);
90 if (ret) {
91 dev_err(&pdev->dev, "clk cec_32k_ao register failed.\n");
92 return ret;
93 }
94
95 return 0;
96}
97 271
98static const struct meson_aoclk_data gxbb_aoclkc_data = { 272static const struct meson_aoclk_data gxbb_aoclkc_data = {
99 .reset_reg = AO_RTI_GEN_CNTL_REG0, 273 .reset_reg = AO_RTI_GEN_CNTL_REG0,
100 .num_reset = ARRAY_SIZE(gxbb_aoclk_reset), 274 .num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
101 .reset = gxbb_aoclk_reset, 275 .reset = gxbb_aoclk_reset,
102 .num_clks = ARRAY_SIZE(gxbb_aoclk_gate), 276 .num_clks = ARRAY_SIZE(gxbb_aoclk),
103 .clks = gxbb_aoclk_gate, 277 .clks = gxbb_aoclk,
104 .hw_data = &gxbb_aoclk_onecell_data, 278 .hw_data = &gxbb_aoclk_onecell_data,
279 .inputs = gxbb_aoclk_inputs,
280 .num_inputs = ARRAY_SIZE(gxbb_aoclk_inputs),
281 .input_prefix = IN_PREFIX,
105}; 282};
106 283
107static int gxbb_aoclkc_probe(struct platform_device *pdev)
108{
109 int ret = gxbb_register_cec_ao_32k(pdev);
110 if (ret)
111 return ret;
112
113 return meson_aoclkc_probe(pdev);
114}
115
116static const struct of_device_id gxbb_aoclkc_match_table[] = { 284static const struct of_device_id gxbb_aoclkc_match_table[] = {
117 { 285 {
118 .compatible = "amlogic,meson-gx-aoclkc", 286 .compatible = "amlogic,meson-gx-aoclkc",
@@ -122,7 +290,7 @@ static const struct of_device_id gxbb_aoclkc_match_table[] = {
122}; 290};
123 291
124static struct platform_driver gxbb_aoclkc_driver = { 292static struct platform_driver gxbb_aoclkc_driver = {
125 .probe = gxbb_aoclkc_probe, 293 .probe = meson_aoclkc_probe,
126 .driver = { 294 .driver = {
127 .name = "gxbb-aoclkc", 295 .name = "gxbb-aoclkc",
128 .of_match_table = gxbb_aoclkc_match_table, 296 .of_match_table = gxbb_aoclkc_match_table,
diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h
index c514493d989a..1db16f9b37d4 100644
--- a/drivers/clk/meson/gxbb-aoclk.h
+++ b/drivers/clk/meson/gxbb-aoclk.h
@@ -7,25 +7,7 @@
7#ifndef __GXBB_AOCLKC_H 7#ifndef __GXBB_AOCLKC_H
8#define __GXBB_AOCLKC_H 8#define __GXBB_AOCLKC_H
9 9
10#define NR_CLKS 7 10#define NR_CLKS 14
11
12/* AO Configuration Clock registers offsets */
13#define AO_RTI_PWR_CNTL_REG1 0x0c
14#define AO_RTI_PWR_CNTL_REG0 0x10
15#define AO_RTI_GEN_CNTL_REG0 0x40
16#define AO_OSCIN_CNTL 0x58
17#define AO_CRT_CLK_CNTL1 0x68
18#define AO_RTC_ALT_CLK_CNTL0 0x94
19#define AO_RTC_ALT_CLK_CNTL1 0x98
20
21struct aoclk_cec_32k {
22 struct clk_hw hw;
23 struct regmap *regmap;
24};
25
26#define to_aoclk_cec_32k(_hw) container_of(_hw, struct aoclk_cec_32k, hw)
27
28extern const struct clk_ops meson_aoclk_cec_32k_ops;
29 11
30#include <dt-bindings/clock/gxbb-aoclkc.h> 12#include <dt-bindings/clock/gxbb-aoclkc.h>
31#include <dt-bindings/reset/gxbb-aoclkc.h> 13#include <dt-bindings/reset/gxbb-aoclkc.h>
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 65f2599e5243..04df2e208ed6 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -4,17 +4,20 @@
4 * Michael Turquette <mturquette@baylibre.com> 4 * Michael Turquette <mturquette@baylibre.com>
5 */ 5 */
6 6
7#include <linux/clk.h>
8#include <linux/clk-provider.h> 7#include <linux/clk-provider.h>
9#include <linux/init.h> 8#include <linux/init.h>
10#include <linux/of_device.h> 9#include <linux/of_device.h>
11#include <linux/mfd/syscon.h>
12#include <linux/platform_device.h> 10#include <linux/platform_device.h>
13#include <linux/regmap.h>
14 11
15#include "clkc.h"
16#include "gxbb.h" 12#include "gxbb.h"
13#include "clk-input.h"
17#include "clk-regmap.h" 14#include "clk-regmap.h"
15#include "clk-pll.h"
16#include "clk-mpll.h"
17#include "meson-eeclk.h"
18#include "vid-pll-div.h"
19
20#define IN_PREFIX "ee-in-"
18 21
19static DEFINE_SPINLOCK(meson_clk_lock); 22static DEFINE_SPINLOCK(meson_clk_lock);
20 23
@@ -118,7 +121,7 @@ static struct clk_regmap gxbb_fixed_pll_dco = {
118 .hw.init = &(struct clk_init_data){ 121 .hw.init = &(struct clk_init_data){
119 .name = "fixed_pll_dco", 122 .name = "fixed_pll_dco",
120 .ops = &meson_clk_pll_ro_ops, 123 .ops = &meson_clk_pll_ro_ops,
121 .parent_names = (const char *[]){ "xtal" }, 124 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
122 .num_parents = 1, 125 .num_parents = 1,
123 }, 126 },
124}; 127};
@@ -148,7 +151,7 @@ static struct clk_fixed_factor gxbb_hdmi_pll_pre_mult = {
148 .hw.init = &(struct clk_init_data){ 151 .hw.init = &(struct clk_init_data){
149 .name = "hdmi_pll_pre_mult", 152 .name = "hdmi_pll_pre_mult",
150 .ops = &clk_fixed_factor_ops, 153 .ops = &clk_fixed_factor_ops,
151 .parent_names = (const char *[]){ "xtal" }, 154 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
152 .num_parents = 1, 155 .num_parents = 1,
153 }, 156 },
154}; 157};
@@ -241,7 +244,7 @@ static struct clk_regmap gxl_hdmi_pll_dco = {
241 .hw.init = &(struct clk_init_data){ 244 .hw.init = &(struct clk_init_data){
242 .name = "hdmi_pll_dco", 245 .name = "hdmi_pll_dco",
243 .ops = &meson_clk_pll_ro_ops, 246 .ops = &meson_clk_pll_ro_ops,
244 .parent_names = (const char *[]){ "xtal" }, 247 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
245 .num_parents = 1, 248 .num_parents = 1,
246 /* 249 /*
247 * Display directly handle hdmi pll registers ATM, we need 250 * Display directly handle hdmi pll registers ATM, we need
@@ -378,7 +381,7 @@ static struct clk_regmap gxbb_sys_pll_dco = {
378 .hw.init = &(struct clk_init_data){ 381 .hw.init = &(struct clk_init_data){
379 .name = "sys_pll_dco", 382 .name = "sys_pll_dco",
380 .ops = &meson_clk_pll_ro_ops, 383 .ops = &meson_clk_pll_ro_ops,
381 .parent_names = (const char *[]){ "xtal" }, 384 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
382 .num_parents = 1, 385 .num_parents = 1,
383 }, 386 },
384}; 387};
@@ -439,7 +442,7 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
439 .hw.init = &(struct clk_init_data){ 442 .hw.init = &(struct clk_init_data){
440 .name = "gp0_pll_dco", 443 .name = "gp0_pll_dco",
441 .ops = &meson_clk_pll_ops, 444 .ops = &meson_clk_pll_ops,
442 .parent_names = (const char *[]){ "xtal" }, 445 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
443 .num_parents = 1, 446 .num_parents = 1,
444 }, 447 },
445}; 448};
@@ -491,7 +494,7 @@ static struct clk_regmap gxl_gp0_pll_dco = {
491 .hw.init = &(struct clk_init_data){ 494 .hw.init = &(struct clk_init_data){
492 .name = "gp0_pll_dco", 495 .name = "gp0_pll_dco",
493 .ops = &meson_clk_pll_ops, 496 .ops = &meson_clk_pll_ops,
494 .parent_names = (const char *[]){ "xtal" }, 497 .parent_names = (const char *[]){ IN_PREFIX "xtal" },
495 .num_parents = 1, 498 .num_parents = 1,
496 }, 499 },
497}; 500};
@@ -789,7 +792,7 @@ static struct clk_regmap gxbb_mpll2 = {
789 792
790static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 }; 793static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
791static const char * const clk81_parent_names[] = { 794static const char * const clk81_parent_names[] = {
792 "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4", 795 IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
793 "fclk_div3", "fclk_div5" 796 "fclk_div3", "fclk_div5"
794}; 797};
795 798
@@ -852,7 +855,7 @@ static struct clk_regmap gxbb_sar_adc_clk_sel = {
852 .name = "sar_adc_clk_sel", 855 .name = "sar_adc_clk_sel",
853 .ops = &clk_regmap_mux_ops, 856 .ops = &clk_regmap_mux_ops,
854 /* NOTE: The datasheet doesn't list the parents for bit 10 */ 857 /* NOTE: The datasheet doesn't list the parents for bit 10 */
855 .parent_names = (const char *[]){ "xtal", "clk81", }, 858 .parent_names = (const char *[]){ IN_PREFIX "xtal", "clk81", },
856 .num_parents = 2, 859 .num_parents = 2,
857 }, 860 },
858}; 861};
@@ -891,7 +894,7 @@ static struct clk_regmap gxbb_sar_adc_clk = {
891 */ 894 */
892 895
893static const char * const gxbb_mali_0_1_parent_names[] = { 896static const char * const gxbb_mali_0_1_parent_names[] = {
894 "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7", 897 IN_PREFIX "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7",
895 "fclk_div4", "fclk_div3", "fclk_div5" 898 "fclk_div4", "fclk_div3", "fclk_div5"
896}; 899};
897 900
@@ -1153,7 +1156,7 @@ static struct clk_regmap gxbb_32k_clk = {
1153}; 1156};
1154 1157
1155static const char * const gxbb_32k_clk_parent_names[] = { 1158static const char * const gxbb_32k_clk_parent_names[] = {
1156 "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5" 1159 IN_PREFIX "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5"
1157}; 1160};
1158 1161
1159static struct clk_regmap gxbb_32k_clk_sel = { 1162static struct clk_regmap gxbb_32k_clk_sel = {
@@ -1172,7 +1175,7 @@ static struct clk_regmap gxbb_32k_clk_sel = {
1172}; 1175};
1173 1176
1174static const char * const gxbb_sd_emmc_clk0_parent_names[] = { 1177static const char * const gxbb_sd_emmc_clk0_parent_names[] = {
1175 "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7", 1178 IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
1176 1179
1177 /* 1180 /*
1178 * Following these parent clocks, we should also have had mpll2, mpll3 1181 * Following these parent clocks, we should also have had mpll2, mpll3
@@ -2138,7 +2141,7 @@ static struct clk_regmap gxbb_hdmi_tx = {
2138/* HDMI Clocks */ 2141/* HDMI Clocks */
2139 2142
2140static const char * const gxbb_hdmi_parent_names[] = { 2143static const char * const gxbb_hdmi_parent_names[] = {
2141 "xtal", "fclk_div4", "fclk_div3", "fclk_div5" 2144 IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
2142}; 2145};
2143 2146
2144static struct clk_regmap gxbb_hdmi_sel = { 2147static struct clk_regmap gxbb_hdmi_sel = {
@@ -2285,7 +2288,7 @@ static struct clk_regmap gxbb_vdec_hevc = {
2285static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8, 2288static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
2286 9, 10, 11, 13, 14, }; 2289 9, 10, 11, 13, 14, };
2287static const char * const gen_clk_parent_names[] = { 2290static const char * const gen_clk_parent_names[] = {
2288 "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2", 2291 IN_PREFIX "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2",
2289 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll", 2292 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
2290}; 2293};
2291 2294
@@ -2854,6 +2857,192 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
2854}; 2857};
2855 2858
2856static struct clk_regmap *const gxbb_clk_regmaps[] = { 2859static struct clk_regmap *const gxbb_clk_regmaps[] = {
2860 &gxbb_clk81,
2861 &gxbb_ddr,
2862 &gxbb_dos,
2863 &gxbb_isa,
2864 &gxbb_pl301,
2865 &gxbb_periphs,
2866 &gxbb_spicc,
2867 &gxbb_i2c,
2868 &gxbb_sar_adc,
2869 &gxbb_smart_card,
2870 &gxbb_rng0,
2871 &gxbb_uart0,
2872 &gxbb_sdhc,
2873 &gxbb_stream,
2874 &gxbb_async_fifo,
2875 &gxbb_sdio,
2876 &gxbb_abuf,
2877 &gxbb_hiu_iface,
2878 &gxbb_assist_misc,
2879 &gxbb_spi,
2880 &gxbb_i2s_spdif,
2881 &gxbb_eth,
2882 &gxbb_demux,
2883 &gxbb_aiu_glue,
2884 &gxbb_iec958,
2885 &gxbb_i2s_out,
2886 &gxbb_amclk,
2887 &gxbb_aififo2,
2888 &gxbb_mixer,
2889 &gxbb_mixer_iface,
2890 &gxbb_adc,
2891 &gxbb_blkmv,
2892 &gxbb_aiu,
2893 &gxbb_uart1,
2894 &gxbb_g2d,
2895 &gxbb_usb0,
2896 &gxbb_usb1,
2897 &gxbb_reset,
2898 &gxbb_nand,
2899 &gxbb_dos_parser,
2900 &gxbb_usb,
2901 &gxbb_vdin1,
2902 &gxbb_ahb_arb0,
2903 &gxbb_efuse,
2904 &gxbb_boot_rom,
2905 &gxbb_ahb_data_bus,
2906 &gxbb_ahb_ctrl_bus,
2907 &gxbb_hdmi_intr_sync,
2908 &gxbb_hdmi_pclk,
2909 &gxbb_usb1_ddr_bridge,
2910 &gxbb_usb0_ddr_bridge,
2911 &gxbb_mmc_pclk,
2912 &gxbb_dvin,
2913 &gxbb_uart2,
2914 &gxbb_sana,
2915 &gxbb_vpu_intr,
2916 &gxbb_sec_ahb_ahb3_bridge,
2917 &gxbb_clk81_a53,
2918 &gxbb_vclk2_venci0,
2919 &gxbb_vclk2_venci1,
2920 &gxbb_vclk2_vencp0,
2921 &gxbb_vclk2_vencp1,
2922 &gxbb_gclk_venci_int0,
2923 &gxbb_gclk_vencp_int,
2924 &gxbb_dac_clk,
2925 &gxbb_aoclk_gate,
2926 &gxbb_iec958_gate,
2927 &gxbb_enc480p,
2928 &gxbb_rng1,
2929 &gxbb_gclk_venci_int1,
2930 &gxbb_vclk2_venclmcc,
2931 &gxbb_vclk2_vencl,
2932 &gxbb_vclk_other,
2933 &gxbb_edp,
2934 &gxbb_ao_media_cpu,
2935 &gxbb_ao_ahb_sram,
2936 &gxbb_ao_ahb_bus,
2937 &gxbb_ao_iface,
2938 &gxbb_ao_i2c,
2939 &gxbb_emmc_a,
2940 &gxbb_emmc_b,
2941 &gxbb_emmc_c,
2942 &gxbb_sar_adc_clk,
2943 &gxbb_mali_0,
2944 &gxbb_mali_1,
2945 &gxbb_cts_amclk,
2946 &gxbb_cts_mclk_i958,
2947 &gxbb_32k_clk,
2948 &gxbb_sd_emmc_a_clk0,
2949 &gxbb_sd_emmc_b_clk0,
2950 &gxbb_sd_emmc_c_clk0,
2951 &gxbb_vpu_0,
2952 &gxbb_vpu_1,
2953 &gxbb_vapb_0,
2954 &gxbb_vapb_1,
2955 &gxbb_vapb,
2956 &gxbb_mpeg_clk_div,
2957 &gxbb_sar_adc_clk_div,
2958 &gxbb_mali_0_div,
2959 &gxbb_mali_1_div,
2960 &gxbb_cts_mclk_i958_div,
2961 &gxbb_32k_clk_div,
2962 &gxbb_sd_emmc_a_clk0_div,
2963 &gxbb_sd_emmc_b_clk0_div,
2964 &gxbb_sd_emmc_c_clk0_div,
2965 &gxbb_vpu_0_div,
2966 &gxbb_vpu_1_div,
2967 &gxbb_vapb_0_div,
2968 &gxbb_vapb_1_div,
2969 &gxbb_mpeg_clk_sel,
2970 &gxbb_sar_adc_clk_sel,
2971 &gxbb_mali_0_sel,
2972 &gxbb_mali_1_sel,
2973 &gxbb_mali,
2974 &gxbb_cts_amclk_sel,
2975 &gxbb_cts_mclk_i958_sel,
2976 &gxbb_cts_i958,
2977 &gxbb_32k_clk_sel,
2978 &gxbb_sd_emmc_a_clk0_sel,
2979 &gxbb_sd_emmc_b_clk0_sel,
2980 &gxbb_sd_emmc_c_clk0_sel,
2981 &gxbb_vpu_0_sel,
2982 &gxbb_vpu_1_sel,
2983 &gxbb_vpu,
2984 &gxbb_vapb_0_sel,
2985 &gxbb_vapb_1_sel,
2986 &gxbb_vapb_sel,
2987 &gxbb_mpll0,
2988 &gxbb_mpll1,
2989 &gxbb_mpll2,
2990 &gxbb_mpll0_div,
2991 &gxbb_mpll1_div,
2992 &gxbb_mpll2_div,
2993 &gxbb_cts_amclk_div,
2994 &gxbb_fixed_pll,
2995 &gxbb_sys_pll,
2996 &gxbb_mpll_prediv,
2997 &gxbb_fclk_div2,
2998 &gxbb_fclk_div3,
2999 &gxbb_fclk_div4,
3000 &gxbb_fclk_div5,
3001 &gxbb_fclk_div7,
3002 &gxbb_vdec_1_sel,
3003 &gxbb_vdec_1_div,
3004 &gxbb_vdec_1,
3005 &gxbb_vdec_hevc_sel,
3006 &gxbb_vdec_hevc_div,
3007 &gxbb_vdec_hevc,
3008 &gxbb_gen_clk_sel,
3009 &gxbb_gen_clk_div,
3010 &gxbb_gen_clk,
3011 &gxbb_fixed_pll_dco,
3012 &gxbb_sys_pll_dco,
3013 &gxbb_gp0_pll,
3014 &gxbb_vid_pll,
3015 &gxbb_vid_pll_sel,
3016 &gxbb_vid_pll_div,
3017 &gxbb_vclk,
3018 &gxbb_vclk_sel,
3019 &gxbb_vclk_div,
3020 &gxbb_vclk_input,
3021 &gxbb_vclk_div1,
3022 &gxbb_vclk_div2_en,
3023 &gxbb_vclk_div4_en,
3024 &gxbb_vclk_div6_en,
3025 &gxbb_vclk_div12_en,
3026 &gxbb_vclk2,
3027 &gxbb_vclk2_sel,
3028 &gxbb_vclk2_div,
3029 &gxbb_vclk2_input,
3030 &gxbb_vclk2_div1,
3031 &gxbb_vclk2_div2_en,
3032 &gxbb_vclk2_div4_en,
3033 &gxbb_vclk2_div6_en,
3034 &gxbb_vclk2_div12_en,
3035 &gxbb_cts_enci,
3036 &gxbb_cts_enci_sel,
3037 &gxbb_cts_encp,
3038 &gxbb_cts_encp_sel,
3039 &gxbb_cts_vdac,
3040 &gxbb_cts_vdac_sel,
3041 &gxbb_hdmi_tx,
3042 &gxbb_hdmi_tx_sel,
3043 &gxbb_hdmi_sel,
3044 &gxbb_hdmi_div,
3045 &gxbb_hdmi,
2857 &gxbb_gp0_pll_dco, 3046 &gxbb_gp0_pll_dco,
2858 &gxbb_hdmi_pll, 3047 &gxbb_hdmi_pll,
2859 &gxbb_hdmi_pll_od, 3048 &gxbb_hdmi_pll_od,
@@ -2862,14 +3051,6 @@ static struct clk_regmap *const gxbb_clk_regmaps[] = {
2862}; 3051};
2863 3052
2864static struct clk_regmap *const gxl_clk_regmaps[] = { 3053static struct clk_regmap *const gxl_clk_regmaps[] = {
2865 &gxl_gp0_pll_dco,
2866 &gxl_hdmi_pll,
2867 &gxl_hdmi_pll_od,
2868 &gxl_hdmi_pll_od2,
2869 &gxl_hdmi_pll_dco,
2870};
2871
2872static struct clk_regmap *const gx_clk_regmaps[] = {
2873 &gxbb_clk81, 3054 &gxbb_clk81,
2874 &gxbb_ddr, 3055 &gxbb_ddr,
2875 &gxbb_dos, 3056 &gxbb_dos,
@@ -3056,23 +3237,22 @@ static struct clk_regmap *const gx_clk_regmaps[] = {
3056 &gxbb_hdmi_sel, 3237 &gxbb_hdmi_sel,
3057 &gxbb_hdmi_div, 3238 &gxbb_hdmi_div,
3058 &gxbb_hdmi, 3239 &gxbb_hdmi,
3240 &gxl_gp0_pll_dco,
3241 &gxl_hdmi_pll,
3242 &gxl_hdmi_pll_od,
3243 &gxl_hdmi_pll_od2,
3244 &gxl_hdmi_pll_dco,
3059}; 3245};
3060 3246
3061struct clkc_data { 3247static const struct meson_eeclkc_data gxbb_clkc_data = {
3062 struct clk_regmap *const *regmap_clks;
3063 unsigned int regmap_clks_count;
3064 struct clk_hw_onecell_data *hw_onecell_data;
3065};
3066
3067static const struct clkc_data gxbb_clkc_data = {
3068 .regmap_clks = gxbb_clk_regmaps, 3248 .regmap_clks = gxbb_clk_regmaps,
3069 .regmap_clks_count = ARRAY_SIZE(gxbb_clk_regmaps), 3249 .regmap_clk_num = ARRAY_SIZE(gxbb_clk_regmaps),
3070 .hw_onecell_data = &gxbb_hw_onecell_data, 3250 .hw_onecell_data = &gxbb_hw_onecell_data,
3071}; 3251};
3072 3252
3073static const struct clkc_data gxl_clkc_data = { 3253static const struct meson_eeclkc_data gxl_clkc_data = {
3074 .regmap_clks = gxl_clk_regmaps, 3254 .regmap_clks = gxl_clk_regmaps,
3075 .regmap_clks_count = ARRAY_SIZE(gxl_clk_regmaps), 3255 .regmap_clk_num = ARRAY_SIZE(gxl_clk_regmaps),
3076 .hw_onecell_data = &gxl_hw_onecell_data, 3256 .hw_onecell_data = &gxl_hw_onecell_data,
3077}; 3257};
3078 3258
@@ -3082,52 +3262,8 @@ static const struct of_device_id clkc_match_table[] = {
3082 {}, 3262 {},
3083}; 3263};
3084 3264
3085static int gxbb_clkc_probe(struct platform_device *pdev)
3086{
3087 const struct clkc_data *clkc_data;
3088 struct regmap *map;
3089 int ret, i;
3090 struct device *dev = &pdev->dev;
3091
3092 clkc_data = of_device_get_match_data(dev);
3093 if (!clkc_data)
3094 return -EINVAL;
3095
3096 /* Get the hhi system controller node if available */
3097 map = syscon_node_to_regmap(of_get_parent(dev->of_node));
3098 if (IS_ERR(map)) {
3099 dev_err(dev, "failed to get HHI regmap\n");
3100 return PTR_ERR(map);
3101 }
3102
3103 /* Populate regmap for the common regmap backed clocks */
3104 for (i = 0; i < ARRAY_SIZE(gx_clk_regmaps); i++)
3105 gx_clk_regmaps[i]->map = map;
3106
3107 /* Populate regmap for soc specific clocks */
3108 for (i = 0; i < clkc_data->regmap_clks_count; i++)
3109 clkc_data->regmap_clks[i]->map = map;
3110
3111 /* Register all clks */
3112 for (i = 0; i < clkc_data->hw_onecell_data->num; i++) {
3113 /* array might be sparse */
3114 if (!clkc_data->hw_onecell_data->hws[i])
3115 continue;
3116
3117 ret = devm_clk_hw_register(dev,
3118 clkc_data->hw_onecell_data->hws[i]);
3119 if (ret) {
3120 dev_err(dev, "Clock registration failed\n");
3121 return ret;
3122 }
3123 }
3124
3125 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
3126 clkc_data->hw_onecell_data);
3127}
3128
3129static struct platform_driver gxbb_driver = { 3265static struct platform_driver gxbb_driver = {
3130 .probe = gxbb_clkc_probe, 3266 .probe = meson_eeclkc_probe,
3131 .driver = { 3267 .driver = {
3132 .name = "gxbb-clkc", 3268 .name = "gxbb-clkc",
3133 .of_match_table = clkc_match_table, 3269 .of_match_table = clkc_match_table,
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index f965845917e3..b67951909e04 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -14,9 +14,11 @@
14#include <linux/reset-controller.h> 14#include <linux/reset-controller.h>
15#include <linux/mfd/syscon.h> 15#include <linux/mfd/syscon.h>
16#include <linux/of_device.h> 16#include <linux/of_device.h>
17#include "clk-regmap.h" 17#include <linux/slab.h>
18#include "meson-aoclk.h" 18#include "meson-aoclk.h"
19 19
20#include "clk-input.h"
21
20static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev, 22static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev,
21 unsigned long id) 23 unsigned long id)
22{ 24{
@@ -31,6 +33,37 @@ static const struct reset_control_ops meson_aoclk_reset_ops = {
31 .reset = meson_aoclk_do_reset, 33 .reset = meson_aoclk_do_reset,
32}; 34};
33 35
36static int meson_aoclkc_register_inputs(struct device *dev,
37 struct meson_aoclk_data *data)
38{
39 struct clk_hw *hw;
40 char *str;
41 int i;
42
43 for (i = 0; i < data->num_inputs; i++) {
44 const struct meson_aoclk_input *in = &data->inputs[i];
45
46 str = kasprintf(GFP_KERNEL, "%s%s", data->input_prefix,
47 in->name);
48 if (!str)
49 return -ENOMEM;
50
51 hw = meson_clk_hw_register_input(dev, in->name, str, 0);
52 kfree(str);
53
54 if (IS_ERR(hw)) {
55 if (!in->required && PTR_ERR(hw) == -ENOENT)
56 continue;
57 else if (PTR_ERR(hw) != -EPROBE_DEFER)
58 dev_err(dev, "failed to register input %s\n",
59 in->name);
60 return PTR_ERR(hw);
61 }
62 }
63
64 return 0;
65}
66
34int meson_aoclkc_probe(struct platform_device *pdev) 67int meson_aoclkc_probe(struct platform_device *pdev)
35{ 68{
36 struct meson_aoclk_reset_controller *rstc; 69 struct meson_aoclk_reset_controller *rstc;
@@ -53,6 +86,10 @@ int meson_aoclkc_probe(struct platform_device *pdev)
53 return PTR_ERR(regmap); 86 return PTR_ERR(regmap);
54 } 87 }
55 88
89 ret = meson_aoclkc_register_inputs(dev, data);
90 if (ret)
91 return ret;
92
56 /* Reset Controller */ 93 /* Reset Controller */
57 rstc->data = data; 94 rstc->data = data;
58 rstc->regmap = regmap; 95 rstc->regmap = regmap;
@@ -65,15 +102,20 @@ int meson_aoclkc_probe(struct platform_device *pdev)
65 return ret; 102 return ret;
66 } 103 }
67 104
68 /* 105 /* Populate regmap */
69 * Populate regmap and register all clks 106 for (clkid = 0; clkid < data->num_clks; clkid++)
70 */
71 for (clkid = 0; clkid < data->num_clks; clkid++) {
72 data->clks[clkid]->map = regmap; 107 data->clks[clkid]->map = regmap;
73 108
109 /* Register all clks */
110 for (clkid = 0; clkid < data->hw_data->num; clkid++) {
111 if (!data->hw_data->hws[clkid])
112 continue;
113
74 ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]); 114 ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
75 if (ret) 115 if (ret) {
116 dev_err(dev, "Clock registration failed\n");
76 return ret; 117 return ret;
118 }
77 } 119 }
78 120
79 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, 121 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
index ab2819e88922..999cde3868f7 100644
--- a/drivers/clk/meson/meson-aoclk.h
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -11,16 +11,27 @@
11#ifndef __MESON_AOCLK_H__ 11#ifndef __MESON_AOCLK_H__
12#define __MESON_AOCLK_H__ 12#define __MESON_AOCLK_H__
13 13
14#include <linux/clk-provider.h>
14#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/regmap.h>
15#include <linux/reset-controller.h> 17#include <linux/reset-controller.h>
18
16#include "clk-regmap.h" 19#include "clk-regmap.h"
17 20
21struct meson_aoclk_input {
22 const char *name;
23 bool required;
24};
25
18struct meson_aoclk_data { 26struct meson_aoclk_data {
19 const unsigned int reset_reg; 27 const unsigned int reset_reg;
20 const int num_reset; 28 const int num_reset;
21 const unsigned int *reset; 29 const unsigned int *reset;
22 int num_clks; 30 const int num_clks;
23 struct clk_regmap **clks; 31 struct clk_regmap **clks;
32 const int num_inputs;
33 const struct meson_aoclk_input *inputs;
34 const char *input_prefix;
24 const struct clk_hw_onecell_data *hw_data; 35 const struct clk_hw_onecell_data *hw_data;
25}; 36};
26 37
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
new file mode 100644
index 000000000000..37a34c9c3885
--- /dev/null
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -0,0 +1,63 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#include <linux/clk-provider.h>
8#include <linux/of_device.h>
9#include <linux/platform_device.h>
10#include <linux/mfd/syscon.h>
11#include <linux/regmap.h>
12
13#include "clk-input.h"
14#include "clk-regmap.h"
15#include "meson-eeclk.h"
16
17int meson_eeclkc_probe(struct platform_device *pdev)
18{
19 const struct meson_eeclkc_data *data;
20 struct device *dev = &pdev->dev;
21 struct clk_hw *input;
22 struct regmap *map;
23 int ret, i;
24
25 data = of_device_get_match_data(dev);
26 if (!data)
27 return -EINVAL;
28
29 /* Get the hhi system controller node */
30 map = syscon_node_to_regmap(of_get_parent(dev->of_node));
31 if (IS_ERR(map)) {
32 dev_err(dev,
33 "failed to get HHI regmap\n");
34 return PTR_ERR(map);
35 }
36
37 input = meson_clk_hw_register_input(dev, "xtal", IN_PREFIX "xtal", 0);
38 if (IS_ERR(input)) {
39 ret = PTR_ERR(input);
40 if (ret != -EPROBE_DEFER)
41 dev_err(dev, "failed to get input clock");
42 return ret;
43 }
44
45 /* Populate regmap for the regmap backed clocks */
46 for (i = 0; i < data->regmap_clk_num; i++)
47 data->regmap_clks[i]->map = map;
48
49 for (i = 0; i < data->hw_onecell_data->num; i++) {
50 /* array might be sparse */
51 if (!data->hw_onecell_data->hws[i])
52 continue;
53
54 ret = devm_clk_hw_register(dev, data->hw_onecell_data->hws[i]);
55 if (ret) {
56 dev_err(dev, "Clock registration failed\n");
57 return ret;
58 }
59 }
60
61 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
62 data->hw_onecell_data);
63}
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h
new file mode 100644
index 000000000000..1b809b1419fe
--- /dev/null
+++ b/drivers/clk/meson/meson-eeclk.h
@@ -0,0 +1,25 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_CLKC_H
8#define __MESON_CLKC_H
9
10#include <linux/clk-provider.h>
11#include "clk-regmap.h"
12
13#define IN_PREFIX "ee-in-"
14
15struct platform_device;
16
17struct meson_eeclkc_data {
18 struct clk_regmap *const *regmap_clks;
19 unsigned int regmap_clk_num;
20 struct clk_hw_onecell_data *hw_onecell_data;
21};
22
23int meson_eeclkc_probe(struct platform_device *pdev);
24
25#endif /* __MESON_CLKC_H */
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 950d0e548c75..576ad42252d0 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -16,9 +16,10 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/regmap.h> 17#include <linux/regmap.h>
18 18
19#include "clkc.h"
20#include "meson8b.h" 19#include "meson8b.h"
21#include "clk-regmap.h" 20#include "clk-regmap.h"
21#include "clk-pll.h"
22#include "clk-mpll.h"
22 23
23static DEFINE_SPINLOCK(meson_clk_lock); 24static DEFINE_SPINLOCK(meson_clk_lock);
24 25
@@ -803,16 +804,16 @@ static struct clk_fixed_factor meson8b_cpu_clk_div8 = {
803 }, 804 },
804}; 805};
805 806
806static u32 mux_table_abp[] = { 1, 2, 3, 4, 5, 6, 7 }; 807static u32 mux_table_apb[] = { 1, 2, 3, 4, 5, 6, 7 };
807static struct clk_regmap meson8b_abp_clk_sel = { 808static struct clk_regmap meson8b_apb_clk_sel = {
808 .data = &(struct clk_regmap_mux_data){ 809 .data = &(struct clk_regmap_mux_data){
809 .offset = HHI_SYS_CPU_CLK_CNTL1, 810 .offset = HHI_SYS_CPU_CLK_CNTL1,
810 .mask = 0x7, 811 .mask = 0x7,
811 .shift = 3, 812 .shift = 3,
812 .table = mux_table_abp, 813 .table = mux_table_apb,
813 }, 814 },
814 .hw.init = &(struct clk_init_data){ 815 .hw.init = &(struct clk_init_data){
815 .name = "abp_clk_sel", 816 .name = "apb_clk_sel",
816 .ops = &clk_regmap_mux_ops, 817 .ops = &clk_regmap_mux_ops,
817 .parent_names = (const char *[]){ "cpu_clk_div2", 818 .parent_names = (const char *[]){ "cpu_clk_div2",
818 "cpu_clk_div3", 819 "cpu_clk_div3",
@@ -825,16 +826,16 @@ static struct clk_regmap meson8b_abp_clk_sel = {
825 }, 826 },
826}; 827};
827 828
828static struct clk_regmap meson8b_abp_clk_gate = { 829static struct clk_regmap meson8b_apb_clk_gate = {
829 .data = &(struct clk_regmap_gate_data){ 830 .data = &(struct clk_regmap_gate_data){
830 .offset = HHI_SYS_CPU_CLK_CNTL1, 831 .offset = HHI_SYS_CPU_CLK_CNTL1,
831 .bit_idx = 16, 832 .bit_idx = 16,
832 .flags = CLK_GATE_SET_TO_DISABLE, 833 .flags = CLK_GATE_SET_TO_DISABLE,
833 }, 834 },
834 .hw.init = &(struct clk_init_data){ 835 .hw.init = &(struct clk_init_data){
835 .name = "abp_clk_dis", 836 .name = "apb_clk_dis",
836 .ops = &clk_regmap_gate_ro_ops, 837 .ops = &clk_regmap_gate_ro_ops,
837 .parent_names = (const char *[]){ "abp_clk_sel" }, 838 .parent_names = (const char *[]){ "apb_clk_sel" },
838 .num_parents = 1, 839 .num_parents = 1,
839 .flags = CLK_SET_RATE_PARENT, 840 .flags = CLK_SET_RATE_PARENT,
840 }, 841 },
@@ -1573,6 +1574,135 @@ static struct clk_regmap meson8b_hdmi_sys = {
1573 }, 1574 },
1574}; 1575};
1575 1576
1577/*
1578 * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
1579 * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only
1580 * has mali_0 and no glitch-free mux.
1581 */
1582static const char * const meson8b_mali_0_1_parent_names[] = {
1583 "xtal", "mpll2", "mpll1", "fclk_div7", "fclk_div4", "fclk_div3",
1584 "fclk_div5"
1585};
1586
1587static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
1588
1589static struct clk_regmap meson8b_mali_0_sel = {
1590 .data = &(struct clk_regmap_mux_data){
1591 .offset = HHI_MALI_CLK_CNTL,
1592 .mask = 0x7,
1593 .shift = 9,
1594 .table = meson8b_mali_0_1_mux_table,
1595 },
1596 .hw.init = &(struct clk_init_data){
1597 .name = "mali_0_sel",
1598 .ops = &clk_regmap_mux_ops,
1599 .parent_names = meson8b_mali_0_1_parent_names,
1600 .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names),
1601 /*
1602 * Don't propagate rate changes up because the only changeable
1603 * parents are mpll1 and mpll2 but we need those for audio and
1604 * RGMII (Ethernet). We don't want to change the audio or
1605 * Ethernet clocks when setting the GPU frequency.
1606 */
1607 .flags = 0,
1608 },
1609};
1610
1611static struct clk_regmap meson8b_mali_0_div = {
1612 .data = &(struct clk_regmap_div_data){
1613 .offset = HHI_MALI_CLK_CNTL,
1614 .shift = 0,
1615 .width = 7,
1616 },
1617 .hw.init = &(struct clk_init_data){
1618 .name = "mali_0_div",
1619 .ops = &clk_regmap_divider_ops,
1620 .parent_names = (const char *[]){ "mali_0_sel" },
1621 .num_parents = 1,
1622 .flags = CLK_SET_RATE_PARENT,
1623 },
1624};
1625
1626static struct clk_regmap meson8b_mali_0 = {
1627 .data = &(struct clk_regmap_gate_data){
1628 .offset = HHI_MALI_CLK_CNTL,
1629 .bit_idx = 8,
1630 },
1631 .hw.init = &(struct clk_init_data){
1632 .name = "mali_0",
1633 .ops = &clk_regmap_gate_ops,
1634 .parent_names = (const char *[]){ "mali_0_div" },
1635 .num_parents = 1,
1636 .flags = CLK_SET_RATE_PARENT,
1637 },
1638};
1639
1640static struct clk_regmap meson8b_mali_1_sel = {
1641 .data = &(struct clk_regmap_mux_data){
1642 .offset = HHI_MALI_CLK_CNTL,
1643 .mask = 0x7,
1644 .shift = 25,
1645 .table = meson8b_mali_0_1_mux_table,
1646 },
1647 .hw.init = &(struct clk_init_data){
1648 .name = "mali_1_sel",
1649 .ops = &clk_regmap_mux_ops,
1650 .parent_names = meson8b_mali_0_1_parent_names,
1651 .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names),
1652 /*
1653 * Don't propagate rate changes up because the only changeable
1654 * parents are mpll1 and mpll2 but we need those for audio and
1655 * RGMII (Ethernet). We don't want to change the audio or
1656 * Ethernet clocks when setting the GPU frequency.
1657 */
1658 .flags = 0,
1659 },
1660};
1661
1662static struct clk_regmap meson8b_mali_1_div = {
1663 .data = &(struct clk_regmap_div_data){
1664 .offset = HHI_MALI_CLK_CNTL,
1665 .shift = 16,
1666 .width = 7,
1667 },
1668 .hw.init = &(struct clk_init_data){
1669 .name = "mali_1_div",
1670 .ops = &clk_regmap_divider_ops,
1671 .parent_names = (const char *[]){ "mali_1_sel" },
1672 .num_parents = 1,
1673 .flags = CLK_SET_RATE_PARENT,
1674 },
1675};
1676
1677static struct clk_regmap meson8b_mali_1 = {
1678 .data = &(struct clk_regmap_gate_data){
1679 .offset = HHI_MALI_CLK_CNTL,
1680 .bit_idx = 24,
1681 },
1682 .hw.init = &(struct clk_init_data){
1683 .name = "mali_1",
1684 .ops = &clk_regmap_gate_ops,
1685 .parent_names = (const char *[]){ "mali_1_div" },
1686 .num_parents = 1,
1687 .flags = CLK_SET_RATE_PARENT,
1688 },
1689};
1690
1691static struct clk_regmap meson8b_mali = {
1692 .data = &(struct clk_regmap_mux_data){
1693 .offset = HHI_MALI_CLK_CNTL,
1694 .mask = 1,
1695 .shift = 31,
1696 },
1697 .hw.init = &(struct clk_init_data){
1698 .name = "mali",
1699 .ops = &clk_regmap_mux_ops,
1700 .parent_names = (const char *[]){ "mali_0", "mali_1" },
1701 .num_parents = 2,
1702 .flags = CLK_SET_RATE_PARENT,
1703 },
1704};
1705
1576/* Everything Else (EE) domain gates */ 1706/* Everything Else (EE) domain gates */
1577 1707
1578static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0); 1708static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
@@ -1659,6 +1789,188 @@ static MESON_GATE(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1);
1659static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2); 1789static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2);
1660static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3); 1790static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
1661 1791
1792static struct clk_hw_onecell_data meson8_hw_onecell_data = {
1793 .hws = {
1794 [CLKID_XTAL] = &meson8b_xtal.hw,
1795 [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
1796 [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
1797 [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
1798 [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
1799 [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
1800 [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
1801 [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
1802 [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
1803 [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
1804 [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
1805 [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
1806 [CLKID_CLK81] = &meson8b_clk81.hw,
1807 [CLKID_DDR] = &meson8b_ddr.hw,
1808 [CLKID_DOS] = &meson8b_dos.hw,
1809 [CLKID_ISA] = &meson8b_isa.hw,
1810 [CLKID_PL301] = &meson8b_pl301.hw,
1811 [CLKID_PERIPHS] = &meson8b_periphs.hw,
1812 [CLKID_SPICC] = &meson8b_spicc.hw,
1813 [CLKID_I2C] = &meson8b_i2c.hw,
1814 [CLKID_SAR_ADC] = &meson8b_sar_adc.hw,
1815 [CLKID_SMART_CARD] = &meson8b_smart_card.hw,
1816 [CLKID_RNG0] = &meson8b_rng0.hw,
1817 [CLKID_UART0] = &meson8b_uart0.hw,
1818 [CLKID_SDHC] = &meson8b_sdhc.hw,
1819 [CLKID_STREAM] = &meson8b_stream.hw,
1820 [CLKID_ASYNC_FIFO] = &meson8b_async_fifo.hw,
1821 [CLKID_SDIO] = &meson8b_sdio.hw,
1822 [CLKID_ABUF] = &meson8b_abuf.hw,
1823 [CLKID_HIU_IFACE] = &meson8b_hiu_iface.hw,
1824 [CLKID_ASSIST_MISC] = &meson8b_assist_misc.hw,
1825 [CLKID_SPI] = &meson8b_spi.hw,
1826 [CLKID_I2S_SPDIF] = &meson8b_i2s_spdif.hw,
1827 [CLKID_ETH] = &meson8b_eth.hw,
1828 [CLKID_DEMUX] = &meson8b_demux.hw,
1829 [CLKID_AIU_GLUE] = &meson8b_aiu_glue.hw,
1830 [CLKID_IEC958] = &meson8b_iec958.hw,
1831 [CLKID_I2S_OUT] = &meson8b_i2s_out.hw,
1832 [CLKID_AMCLK] = &meson8b_amclk.hw,
1833 [CLKID_AIFIFO2] = &meson8b_aififo2.hw,
1834 [CLKID_MIXER] = &meson8b_mixer.hw,
1835 [CLKID_MIXER_IFACE] = &meson8b_mixer_iface.hw,
1836 [CLKID_ADC] = &meson8b_adc.hw,
1837 [CLKID_BLKMV] = &meson8b_blkmv.hw,
1838 [CLKID_AIU] = &meson8b_aiu.hw,
1839 [CLKID_UART1] = &meson8b_uart1.hw,
1840 [CLKID_G2D] = &meson8b_g2d.hw,
1841 [CLKID_USB0] = &meson8b_usb0.hw,
1842 [CLKID_USB1] = &meson8b_usb1.hw,
1843 [CLKID_RESET] = &meson8b_reset.hw,
1844 [CLKID_NAND] = &meson8b_nand.hw,
1845 [CLKID_DOS_PARSER] = &meson8b_dos_parser.hw,
1846 [CLKID_USB] = &meson8b_usb.hw,
1847 [CLKID_VDIN1] = &meson8b_vdin1.hw,
1848 [CLKID_AHB_ARB0] = &meson8b_ahb_arb0.hw,
1849 [CLKID_EFUSE] = &meson8b_efuse.hw,
1850 [CLKID_BOOT_ROM] = &meson8b_boot_rom.hw,
1851 [CLKID_AHB_DATA_BUS] = &meson8b_ahb_data_bus.hw,
1852 [CLKID_AHB_CTRL_BUS] = &meson8b_ahb_ctrl_bus.hw,
1853 [CLKID_HDMI_INTR_SYNC] = &meson8b_hdmi_intr_sync.hw,
1854 [CLKID_HDMI_PCLK] = &meson8b_hdmi_pclk.hw,
1855 [CLKID_USB1_DDR_BRIDGE] = &meson8b_usb1_ddr_bridge.hw,
1856 [CLKID_USB0_DDR_BRIDGE] = &meson8b_usb0_ddr_bridge.hw,
1857 [CLKID_MMC_PCLK] = &meson8b_mmc_pclk.hw,
1858 [CLKID_DVIN] = &meson8b_dvin.hw,
1859 [CLKID_UART2] = &meson8b_uart2.hw,
1860 [CLKID_SANA] = &meson8b_sana.hw,
1861 [CLKID_VPU_INTR] = &meson8b_vpu_intr.hw,
1862 [CLKID_SEC_AHB_AHB3_BRIDGE] = &meson8b_sec_ahb_ahb3_bridge.hw,
1863 [CLKID_CLK81_A9] = &meson8b_clk81_a9.hw,
1864 [CLKID_VCLK2_VENCI0] = &meson8b_vclk2_venci0.hw,
1865 [CLKID_VCLK2_VENCI1] = &meson8b_vclk2_venci1.hw,
1866 [CLKID_VCLK2_VENCP0] = &meson8b_vclk2_vencp0.hw,
1867 [CLKID_VCLK2_VENCP1] = &meson8b_vclk2_vencp1.hw,
1868 [CLKID_GCLK_VENCI_INT] = &meson8b_gclk_venci_int.hw,
1869 [CLKID_GCLK_VENCP_INT] = &meson8b_gclk_vencp_int.hw,
1870 [CLKID_DAC_CLK] = &meson8b_dac_clk.hw,
1871 [CLKID_AOCLK_GATE] = &meson8b_aoclk_gate.hw,
1872 [CLKID_IEC958_GATE] = &meson8b_iec958_gate.hw,
1873 [CLKID_ENC480P] = &meson8b_enc480p.hw,
1874 [CLKID_RNG1] = &meson8b_rng1.hw,
1875 [CLKID_GCLK_VENCL_INT] = &meson8b_gclk_vencl_int.hw,
1876 [CLKID_VCLK2_VENCLMCC] = &meson8b_vclk2_venclmcc.hw,
1877 [CLKID_VCLK2_VENCL] = &meson8b_vclk2_vencl.hw,
1878 [CLKID_VCLK2_OTHER] = &meson8b_vclk2_other.hw,
1879 [CLKID_EDP] = &meson8b_edp.hw,
1880 [CLKID_AO_MEDIA_CPU] = &meson8b_ao_media_cpu.hw,
1881 [CLKID_AO_AHB_SRAM] = &meson8b_ao_ahb_sram.hw,
1882 [CLKID_AO_AHB_BUS] = &meson8b_ao_ahb_bus.hw,
1883 [CLKID_AO_IFACE] = &meson8b_ao_iface.hw,
1884 [CLKID_MPLL0] = &meson8b_mpll0.hw,
1885 [CLKID_MPLL1] = &meson8b_mpll1.hw,
1886 [CLKID_MPLL2] = &meson8b_mpll2.hw,
1887 [CLKID_MPLL0_DIV] = &meson8b_mpll0_div.hw,
1888 [CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw,
1889 [CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw,
1890 [CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw,
1891 [CLKID_CPU_IN_DIV2] = &meson8b_cpu_in_div2.hw,
1892 [CLKID_CPU_IN_DIV3] = &meson8b_cpu_in_div3.hw,
1893 [CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw,
1894 [CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw,
1895 [CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw,
1896 [CLKID_FCLK_DIV2_DIV] = &meson8b_fclk_div2_div.hw,
1897 [CLKID_FCLK_DIV3_DIV] = &meson8b_fclk_div3_div.hw,
1898 [CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw,
1899 [CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw,
1900 [CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
1901 [CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
1902 [CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
1903 [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
1904 [CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
1905 [CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
1906 [CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
1907 [CLKID_CPU_CLK_DIV2] = &meson8b_cpu_clk_div2.hw,
1908 [CLKID_CPU_CLK_DIV3] = &meson8b_cpu_clk_div3.hw,
1909 [CLKID_CPU_CLK_DIV4] = &meson8b_cpu_clk_div4.hw,
1910 [CLKID_CPU_CLK_DIV5] = &meson8b_cpu_clk_div5.hw,
1911 [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
1912 [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
1913 [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
1914 [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
1915 [CLKID_APB] = &meson8b_apb_clk_gate.hw,
1916 [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
1917 [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
1918 [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
1919 [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
1920 [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
1921 [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
1922 [CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
1923 [CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
1924 [CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
1925 [CLKID_VID_PLL_IN_EN] = &meson8b_vid_pll_in_en.hw,
1926 [CLKID_VID_PLL_PRE_DIV] = &meson8b_vid_pll_pre_div.hw,
1927 [CLKID_VID_PLL_POST_DIV] = &meson8b_vid_pll_post_div.hw,
1928 [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw,
1929 [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
1930 [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
1931 [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
1932 [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
1933 [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
1934 [CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
1935 [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
1936 [CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
1937 [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
1938 [CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
1939 [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
1940 [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
1941 [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
1942 [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
1943 [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
1944 [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
1945 [CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
1946 [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
1947 [CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
1948 [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
1949 [CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
1950 [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
1951 [CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
1952 [CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
1953 [CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
1954 [CLKID_CTS_ENCP] = &meson8b_cts_encp.hw,
1955 [CLKID_CTS_ENCI_SEL] = &meson8b_cts_enci_sel.hw,
1956 [CLKID_CTS_ENCI] = &meson8b_cts_enci.hw,
1957 [CLKID_HDMI_TX_PIXEL_SEL] = &meson8b_hdmi_tx_pixel_sel.hw,
1958 [CLKID_HDMI_TX_PIXEL] = &meson8b_hdmi_tx_pixel.hw,
1959 [CLKID_CTS_ENCL_SEL] = &meson8b_cts_encl_sel.hw,
1960 [CLKID_CTS_ENCL] = &meson8b_cts_encl.hw,
1961 [CLKID_CTS_VDAC0_SEL] = &meson8b_cts_vdac0_sel.hw,
1962 [CLKID_CTS_VDAC0] = &meson8b_cts_vdac0.hw,
1963 [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
1964 [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
1965 [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
1966 [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
1967 [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
1968 [CLKID_MALI] = &meson8b_mali_0.hw,
1969 [CLK_NR_CLKS] = NULL,
1970 },
1971 .num = CLK_NR_CLKS,
1972};
1973
1662static struct clk_hw_onecell_data meson8b_hw_onecell_data = { 1974static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
1663 .hws = { 1975 .hws = {
1664 [CLKID_XTAL] = &meson8b_xtal.hw, 1976 [CLKID_XTAL] = &meson8b_xtal.hw,
@@ -1781,8 +2093,8 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
1781 [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw, 2093 [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
1782 [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw, 2094 [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
1783 [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw, 2095 [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
1784 [CLKID_ABP_SEL] = &meson8b_abp_clk_sel.hw, 2096 [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
1785 [CLKID_ABP] = &meson8b_abp_clk_gate.hw, 2097 [CLKID_APB] = &meson8b_apb_clk_gate.hw,
1786 [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw, 2098 [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
1787 [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw, 2099 [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
1788 [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw, 2100 [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
@@ -1833,6 +2145,13 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
1833 [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw, 2145 [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
1834 [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw, 2146 [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
1835 [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw, 2147 [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
2148 [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
2149 [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
2150 [CLKID_MALI_0] = &meson8b_mali_0.hw,
2151 [CLKID_MALI_1_SEL] = &meson8b_mali_1_sel.hw,
2152 [CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw,
2153 [CLKID_MALI_1] = &meson8b_mali_1.hw,
2154 [CLKID_MALI] = &meson8b_mali.hw,
1836 [CLK_NR_CLKS] = NULL, 2155 [CLK_NR_CLKS] = NULL,
1837 }, 2156 },
1838 .num = CLK_NR_CLKS, 2157 .num = CLK_NR_CLKS,
@@ -1943,8 +2262,8 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
1943 &meson8b_fixed_pll_dco, 2262 &meson8b_fixed_pll_dco,
1944 &meson8b_hdmi_pll_dco, 2263 &meson8b_hdmi_pll_dco,
1945 &meson8b_sys_pll_dco, 2264 &meson8b_sys_pll_dco,
1946 &meson8b_abp_clk_sel, 2265 &meson8b_apb_clk_sel,
1947 &meson8b_abp_clk_gate, 2266 &meson8b_apb_clk_gate,
1948 &meson8b_periph_clk_sel, 2267 &meson8b_periph_clk_sel,
1949 &meson8b_periph_clk_gate, 2268 &meson8b_periph_clk_gate,
1950 &meson8b_axi_clk_sel, 2269 &meson8b_axi_clk_sel,
@@ -1988,6 +2307,13 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
1988 &meson8b_hdmi_sys_sel, 2307 &meson8b_hdmi_sys_sel,
1989 &meson8b_hdmi_sys_div, 2308 &meson8b_hdmi_sys_div,
1990 &meson8b_hdmi_sys, 2309 &meson8b_hdmi_sys,
2310 &meson8b_mali_0_sel,
2311 &meson8b_mali_0_div,
2312 &meson8b_mali_0,
2313 &meson8b_mali_1_sel,
2314 &meson8b_mali_1_div,
2315 &meson8b_mali_1,
2316 &meson8b_mali,
1991}; 2317};
1992 2318
1993static const struct meson8b_clk_reset_line { 2319static const struct meson8b_clk_reset_line {
@@ -2132,7 +2458,6 @@ static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
2132 2458
2133static struct meson8b_nb_data meson8b_cpu_nb_data = { 2459static struct meson8b_nb_data meson8b_cpu_nb_data = {
2134 .nb.notifier_call = meson8b_cpu_clk_notifier_cb, 2460 .nb.notifier_call = meson8b_cpu_clk_notifier_cb,
2135 .onecell_data = &meson8b_hw_onecell_data,
2136}; 2461};
2137 2462
2138static const struct regmap_config clkc_regmap_config = { 2463static const struct regmap_config clkc_regmap_config = {
@@ -2141,7 +2466,8 @@ static const struct regmap_config clkc_regmap_config = {
2141 .reg_stride = 4, 2466 .reg_stride = 4,
2142}; 2467};
2143 2468
2144static void __init meson8b_clkc_init(struct device_node *np) 2469static void __init meson8b_clkc_init_common(struct device_node *np,
2470 struct clk_hw_onecell_data *clk_hw_onecell_data)
2145{ 2471{
2146 struct meson8b_clk_reset *rstc; 2472 struct meson8b_clk_reset *rstc;
2147 const char *notifier_clk_name; 2473 const char *notifier_clk_name;
@@ -2192,14 +2518,16 @@ static void __init meson8b_clkc_init(struct device_node *np)
2192 */ 2518 */
2193 for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) { 2519 for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
2194 /* array might be sparse */ 2520 /* array might be sparse */
2195 if (!meson8b_hw_onecell_data.hws[i]) 2521 if (!clk_hw_onecell_data->hws[i])
2196 continue; 2522 continue;
2197 2523
2198 ret = clk_hw_register(NULL, meson8b_hw_onecell_data.hws[i]); 2524 ret = clk_hw_register(NULL, clk_hw_onecell_data->hws[i]);
2199 if (ret) 2525 if (ret)
2200 return; 2526 return;
2201 } 2527 }
2202 2528
2529 meson8b_cpu_nb_data.onecell_data = clk_hw_onecell_data;
2530
2203 /* 2531 /*
2204 * FIXME we shouldn't program the muxes in notifier handlers. The 2532 * FIXME we shouldn't program the muxes in notifier handlers. The
2205 * tricky programming sequence will be handled by the forthcoming 2533 * tricky programming sequence will be handled by the forthcoming
@@ -2215,13 +2543,23 @@ static void __init meson8b_clkc_init(struct device_node *np)
2215 } 2543 }
2216 2544
2217 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, 2545 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
2218 &meson8b_hw_onecell_data); 2546 clk_hw_onecell_data);
2219 if (ret) 2547 if (ret)
2220 pr_err("%s: failed to register clock provider\n", __func__); 2548 pr_err("%s: failed to register clock provider\n", __func__);
2221} 2549}
2222 2550
2551static void __init meson8_clkc_init(struct device_node *np)
2552{
2553 return meson8b_clkc_init_common(np, &meson8_hw_onecell_data);
2554}
2555
2556static void __init meson8b_clkc_init(struct device_node *np)
2557{
2558 return meson8b_clkc_init_common(np, &meson8b_hw_onecell_data);
2559}
2560
2223CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc", 2561CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc",
2224 meson8b_clkc_init); 2562 meson8_clkc_init);
2225CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc", 2563CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc",
2226 meson8b_clkc_init); 2564 meson8b_clkc_init);
2227CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc", 2565CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc",
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index 87fba739af81..b8c58faeae52 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -33,6 +33,7 @@
33#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */ 33#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
34#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */ 34#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
35#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */ 35#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
36#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
36#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */ 37#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
37#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */ 38#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
38#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */ 39#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
@@ -91,7 +92,7 @@
91#define CLKID_CPU_CLK_DIV6 120 92#define CLKID_CPU_CLK_DIV6 120
92#define CLKID_CPU_CLK_DIV7 121 93#define CLKID_CPU_CLK_DIV7 121
93#define CLKID_CPU_CLK_DIV8 122 94#define CLKID_CPU_CLK_DIV8 122
94#define CLKID_ABP_SEL 123 95#define CLKID_APB_SEL 123
95#define CLKID_PERIPH_SEL 125 96#define CLKID_PERIPH_SEL 125
96#define CLKID_AXI_SEL 127 97#define CLKID_AXI_SEL 127
97#define CLKID_L2_DRAM_SEL 129 98#define CLKID_L2_DRAM_SEL 129
@@ -139,8 +140,14 @@
139#define CLKID_HDMI_SYS_SEL 172 140#define CLKID_HDMI_SYS_SEL 172
140#define CLKID_HDMI_SYS_DIV 173 141#define CLKID_HDMI_SYS_DIV 173
141#define CLKID_HDMI_SYS 174 142#define CLKID_HDMI_SYS 174
143#define CLKID_MALI_0_SEL 175
144#define CLKID_MALI_0_DIV 176
145#define CLKID_MALI_0 177
146#define CLKID_MALI_1_SEL 178
147#define CLKID_MALI_1_DIV 179
148#define CLKID_MALI_1 180
142 149
143#define CLK_NR_CLKS 175 150#define CLK_NR_CLKS 181
144 151
145/* 152/*
146 * include the CLKID and RESETID that have 153 * include the CLKID and RESETID that have
diff --git a/drivers/clk/meson/parm.h b/drivers/clk/meson/parm.h
new file mode 100644
index 000000000000..3c9ef1b505ce
--- /dev/null
+++ b/drivers/clk/meson/parm.h
@@ -0,0 +1,46 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2015 Endless Mobile, Inc.
4 * Author: Carlo Caione <carlo@endlessm.com>
5 */
6
7#ifndef __MESON_PARM_H
8#define __MESON_PARM_H
9
10#include <linux/bits.h>
11#include <linux/regmap.h>
12
13#define PMASK(width) GENMASK(width - 1, 0)
14#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift)
15#define CLRPMASK(width, shift) (~SETPMASK(width, shift))
16
17#define PARM_GET(width, shift, reg) \
18 (((reg) & SETPMASK(width, shift)) >> (shift))
19#define PARM_SET(width, shift, reg, val) \
20 (((reg) & CLRPMASK(width, shift)) | ((val) << (shift)))
21
22#define MESON_PARM_APPLICABLE(p) (!!((p)->width))
23
24struct parm {
25 u16 reg_off;
26 u8 shift;
27 u8 width;
28};
29
30static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
31{
32 unsigned int val;
33
34 regmap_read(map, p->reg_off, &val);
35 return PARM_GET(p->width, p->shift, val);
36}
37
38static inline void meson_parm_write(struct regmap *map, struct parm *p,
39 unsigned int val)
40{
41 regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
42 val << p->shift);
43}
44
45#endif /* __MESON_PARM_H */
46
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index bc64019b8eeb..3acf03780221 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -16,7 +16,11 @@
16 * duty_cycle = (1 + hi) / (1 + val) 16 * duty_cycle = (1 + hi) / (1 + val)
17 */ 17 */
18 18
19#include "clkc-audio.h" 19#include <linux/clk-provider.h>
20#include <linux/module.h>
21
22#include "clk-regmap.h"
23#include "sclk-div.h"
20 24
21static inline struct meson_sclk_div_data * 25static inline struct meson_sclk_div_data *
22meson_sclk_div_data(struct clk_regmap *clk) 26meson_sclk_div_data(struct clk_regmap *clk)
@@ -241,3 +245,7 @@ const struct clk_ops meson_sclk_div_ops = {
241 .init = sclk_div_init, 245 .init = sclk_div_init,
242}; 246};
243EXPORT_SYMBOL_GPL(meson_sclk_div_ops); 247EXPORT_SYMBOL_GPL(meson_sclk_div_ops);
248
249MODULE_DESCRIPTION("Amlogic Sample divider driver");
250MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
251MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clkc-audio.h b/drivers/clk/meson/sclk-div.h
index 0a7c157ebf81..b64b2a32005f 100644
--- a/drivers/clk/meson/clkc-audio.h
+++ b/drivers/clk/meson/sclk-div.h
@@ -4,16 +4,11 @@
4 * Author: Jerome Brunet <jbrunet@baylibre.com> 4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */ 5 */
6 6
7#ifndef __MESON_CLKC_AUDIO_H 7#ifndef __MESON_SCLK_DIV_H
8#define __MESON_CLKC_AUDIO_H 8#define __MESON_SCLK_DIV_H
9 9
10#include "clkc.h" 10#include <linux/clk-provider.h>
11 11#include "parm.h"
12struct meson_clk_triphase_data {
13 struct parm ph0;
14 struct parm ph1;
15 struct parm ph2;
16};
17 12
18struct meson_sclk_div_data { 13struct meson_sclk_div_data {
19 struct parm div; 14 struct parm div;
@@ -22,7 +17,6 @@ struct meson_sclk_div_data {
22 struct clk_duty cached_duty; 17 struct clk_duty cached_duty;
23}; 18};
24 19
25extern const struct clk_ops meson_clk_triphase_ops;
26extern const struct clk_ops meson_sclk_div_ops; 20extern const struct clk_ops meson_sclk_div_ops;
27 21
28#endif /* __MESON_CLKC_AUDIO_H */ 22#endif /* __MESON_SCLK_DIV_H */
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index 88af0e282ea0..08bcc01c0923 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -5,7 +5,10 @@
5 */ 5 */
6 6
7#include <linux/clk-provider.h> 7#include <linux/clk-provider.h>
8#include "clkc.h" 8#include <linux/module.h>
9
10#include "clk-regmap.h"
11#include "vid-pll-div.h"
9 12
10static inline struct meson_vid_pll_div_data * 13static inline struct meson_vid_pll_div_data *
11meson_vid_pll_div_data(struct clk_regmap *clk) 14meson_vid_pll_div_data(struct clk_regmap *clk)
@@ -89,3 +92,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
89const struct clk_ops meson_vid_pll_div_ro_ops = { 92const struct clk_ops meson_vid_pll_div_ro_ops = {
90 .recalc_rate = meson_vid_pll_div_recalc_rate, 93 .recalc_rate = meson_vid_pll_div_recalc_rate,
91}; 94};
95EXPORT_SYMBOL_GPL(meson_vid_pll_div_ro_ops);
96
97MODULE_DESCRIPTION("Amlogic video pll divider driver");
98MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
99MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/vid-pll-div.h b/drivers/clk/meson/vid-pll-div.h
new file mode 100644
index 000000000000..c0128e33ccf9
--- /dev/null
+++ b/drivers/clk/meson/vid-pll-div.h
@@ -0,0 +1,20 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 BayLibre, SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __MESON_VID_PLL_DIV_H
8#define __MESON_VID_PLL_DIV_H
9
10#include <linux/clk-provider.h>
11#include "parm.h"
12
13struct meson_vid_pll_div_data {
14 struct parm val;
15 struct parm sel;
16};
17
18extern const struct clk_ops meson_vid_pll_div_ro_ops;
19
20#endif /* __MESON_VID_PLL_DIV_H */
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index d083b860f083..a60a1be937ad 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -229,9 +229,10 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
229 {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, 229 {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
230 {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, 230 {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
231 {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, 231 {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
232 {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock}, 232 {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock},
233 {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock},
233 {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock}, 234 {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
234 {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock}, 235 {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x09, 0x09, 0x0, 0, &disp1_lock},
235 {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock}, 236 {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock},
236 {MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock}, 237 {MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
237 {MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock}, 238 {MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 7dedfaa6e152..5c6bbee396b3 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -175,8 +175,10 @@ static void __init a370_clk_init(struct device_node *np)
175 175
176 mvebu_coreclk_setup(np, &a370_coreclks); 176 mvebu_coreclk_setup(np, &a370_coreclks);
177 177
178 if (cgnp) 178 if (cgnp) {
179 mvebu_clk_gating_setup(cgnp, a370_gating_desc); 179 mvebu_clk_gating_setup(cgnp, a370_gating_desc);
180 of_node_put(cgnp);
181 }
180} 182}
181CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init); 183CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
182 184
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index e8f03293ec83..fa1568279c23 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -226,7 +226,9 @@ static void __init axp_clk_init(struct device_node *np)
226 226
227 mvebu_coreclk_setup(np, &axp_coreclks); 227 mvebu_coreclk_setup(np, &axp_coreclks);
228 228
229 if (cgnp) 229 if (cgnp) {
230 mvebu_clk_gating_setup(cgnp, axp_gating_desc); 230 mvebu_clk_gating_setup(cgnp, axp_gating_desc);
231 of_node_put(cgnp);
232 }
231} 233}
232CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init); 234CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index e0dd99f36bf4..0bd09d33f9cf 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -188,10 +188,14 @@ static void __init dove_clk_init(struct device_node *np)
188 188
189 mvebu_coreclk_setup(np, &dove_coreclks); 189 mvebu_coreclk_setup(np, &dove_coreclks);
190 190
191 if (ddnp) 191 if (ddnp) {
192 dove_divider_clk_init(ddnp); 192 dove_divider_clk_init(ddnp);
193 of_node_put(ddnp);
194 }
193 195
194 if (cgnp) 196 if (cgnp) {
195 mvebu_clk_gating_setup(cgnp, dove_gating_desc); 197 mvebu_clk_gating_setup(cgnp, dove_gating_desc);
198 of_node_put(cgnp);
199 }
196} 200}
197CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init); 201CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 6f784167bda4..35af3aa18f1c 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -331,6 +331,8 @@ static void __init kirkwood_clk_init(struct device_node *np)
331 if (cgnp) { 331 if (cgnp) {
332 mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc); 332 mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
333 kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc); 333 kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc);
334
335 of_node_put(cgnp);
334 } 336 }
335} 337}
336CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", 338CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
diff --git a/drivers/clk/mvebu/mv98dx3236.c b/drivers/clk/mvebu/mv98dx3236.c
index 0a74cf7a7725..1c8ab4f834ba 100644
--- a/drivers/clk/mvebu/mv98dx3236.c
+++ b/drivers/clk/mvebu/mv98dx3236.c
@@ -172,7 +172,9 @@ static void __init mv98dx3236_clk_init(struct device_node *np)
172 172
173 mvebu_coreclk_setup(np, &mv98dx3236_core_clocks); 173 mvebu_coreclk_setup(np, &mv98dx3236_core_clocks);
174 174
175 if (cgnp) 175 if (cgnp) {
176 mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc); 176 mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc);
177 of_node_put(cgnp);
178 }
177} 179}
178CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init); 180CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init);
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index 10e852518870..4d92b27a6153 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -21,7 +21,7 @@
21 21
22enum clk_ids { 22enum clk_ids {
23 /* Core Clock Outputs exported to DT */ 23 /* Core Clock Outputs exported to DT */
24 LAST_DT_CORE_CLK = R8A774A1_CLK_OSC, 24 LAST_DT_CORE_CLK = R8A774A1_CLK_CANFD,
25 25
26 /* External Input Clocks */ 26 /* External Input Clocks */
27 CLK_EXTAL, 27 CLK_EXTAL,
@@ -102,6 +102,7 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
102 DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1), 102 DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1),
103 DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1), 103 DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1),
104 104
105 DEF_DIV6P1("canfd", R8A774A1_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
105 DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), 106 DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
106 DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014), 107 DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014),
107 DEF_DIV6P1("hdmi", R8A774A1_CLK_HDMI, CLK_PLL1_DIV4, 0x250), 108 DEF_DIV6P1("hdmi", R8A774A1_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
@@ -191,6 +192,7 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
191 DEF_MOD("gpio2", 910, R8A774A1_CLK_S3D4), 192 DEF_MOD("gpio2", 910, R8A774A1_CLK_S3D4),
192 DEF_MOD("gpio1", 911, R8A774A1_CLK_S3D4), 193 DEF_MOD("gpio1", 911, R8A774A1_CLK_S3D4),
193 DEF_MOD("gpio0", 912, R8A774A1_CLK_S3D4), 194 DEF_MOD("gpio0", 912, R8A774A1_CLK_S3D4),
195 DEF_MOD("can-fd", 914, R8A774A1_CLK_S3D2),
194 DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4), 196 DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4),
195 DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4), 197 DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4),
196 DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6), 198 DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index 10b96895d452..34e274f2a273 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -22,7 +22,7 @@
22 22
23enum clk_ids { 23enum clk_ids {
24 /* Core Clock Outputs exported to DT */ 24 /* Core Clock Outputs exported to DT */
25 LAST_DT_CORE_CLK = R8A774C0_CLK_CPEX, 25 LAST_DT_CORE_CLK = R8A774C0_CLK_CANFD,
26 26
27 /* External Input Clocks */ 27 /* External Input Clocks */
28 CLK_EXTAL, 28 CLK_EXTAL,
@@ -33,6 +33,7 @@ enum clk_ids {
33 CLK_PLL1, 33 CLK_PLL1,
34 CLK_PLL3, 34 CLK_PLL3,
35 CLK_PLL0D4, 35 CLK_PLL0D4,
36 CLK_PLL0D6,
36 CLK_PLL0D8, 37 CLK_PLL0D8,
37 CLK_PLL0D20, 38 CLK_PLL0D20,
38 CLK_PLL0D24, 39 CLK_PLL0D24,
@@ -61,6 +62,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
61 62
62 DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100), 63 DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100),
63 DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1), 64 DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1),
65 DEF_FIXED(".pll0d6", CLK_PLL0D6, CLK_PLL0, 6, 1),
64 DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1), 66 DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1),
65 DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1), 67 DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1),
66 DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1), 68 DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1),
@@ -112,6 +114,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
112 DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2), 114 DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2),
113 DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4), 115 DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4),
114 116
117 DEF_DIV6P1("canfd", R8A774C0_CLK_CANFD, CLK_PLL0D6, 0x244),
115 DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c), 118 DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c),
116 DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014), 119 DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014),
117 120
@@ -119,6 +122,11 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
119}; 122};
120 123
121static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = { 124static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
125 DEF_MOD("tmu4", 121, R8A774C0_CLK_S0D6C),
126 DEF_MOD("tmu3", 122, R8A774C0_CLK_S3D2C),
127 DEF_MOD("tmu2", 123, R8A774C0_CLK_S3D2C),
128 DEF_MOD("tmu1", 124, R8A774C0_CLK_S3D2C),
129 DEF_MOD("tmu0", 125, R8A774C0_CLK_CP),
122 DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C), 130 DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C),
123 DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C), 131 DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C),
124 DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C), 132 DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C),
@@ -172,8 +180,8 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
172 DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4), 180 DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4),
173 DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4), 181 DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4),
174 DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0), 182 DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0),
175 DEF_MOD("du1", 723, R8A774C0_CLK_S2D1), 183 DEF_MOD("du1", 723, R8A774C0_CLK_S1D1),
176 DEF_MOD("du0", 724, R8A774C0_CLK_S2D1), 184 DEF_MOD("du0", 724, R8A774C0_CLK_S1D1),
177 DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1), 185 DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1),
178 186
179 DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2), 187 DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2),
@@ -187,6 +195,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
187 DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4), 195 DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4),
188 DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4), 196 DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4),
189 DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4), 197 DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4),
198 DEF_MOD("can-fd", 914, R8A774C0_CLK_S3D2),
190 DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4), 199 DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4),
191 DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4), 200 DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4),
192 DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2), 201 DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index 25a3083b6764..f9e07fcc0d96 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -41,6 +41,7 @@ enum clk_ids {
41 CLK_S2, 41 CLK_S2,
42 CLK_S3, 42 CLK_S3,
43 CLK_SDSRC, 43 CLK_SDSRC,
44 CLK_RPCSRC,
44 CLK_OCO, 45 CLK_OCO,
45 46
46 /* Module Clocks */ 47 /* Module Clocks */
@@ -65,8 +66,14 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
65 DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), 66 DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
66 DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), 67 DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
67 DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), 68 DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
69 DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
68 DEF_RATE(".oco", CLK_OCO, 32768), 70 DEF_RATE(".oco", CLK_OCO, 32768),
69 71
72 DEF_BASE("rpc", R8A77980_CLK_RPC, CLK_TYPE_GEN3_RPC,
73 CLK_RPCSRC),
74 DEF_BASE("rpcd2", R8A77980_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
75 R8A77980_CLK_RPC),
76
70 /* Core Clock Outputs */ 77 /* Core Clock Outputs */
71 DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), 78 DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
72 DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), 79 DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
@@ -164,6 +171,7 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
164 DEF_MOD("gpio1", 911, R8A77980_CLK_CP), 171 DEF_MOD("gpio1", 911, R8A77980_CLK_CP),
165 DEF_MOD("gpio0", 912, R8A77980_CLK_CP), 172 DEF_MOD("gpio0", 912, R8A77980_CLK_CP),
166 DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2), 173 DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2),
174 DEF_MOD("rpc-if", 917, R8A77980_CLK_RPC),
167 DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6), 175 DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6),
168 DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6), 176 DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6),
169 DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2), 177 DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2),
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index be2ccbd6d623..9a8071a8114d 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -30,6 +30,21 @@
30 30
31#define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */ 31#define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */
32 32
33static spinlock_t cpg_lock;
34
35static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
36{
37 unsigned long flags;
38 u32 val;
39
40 spin_lock_irqsave(&cpg_lock, flags);
41 val = readl(reg);
42 val &= ~clear;
43 val |= set;
44 writel(val, reg);
45 spin_unlock_irqrestore(&cpg_lock, flags);
46};
47
33struct cpg_simple_notifier { 48struct cpg_simple_notifier {
34 struct notifier_block nb; 49 struct notifier_block nb;
35 void __iomem *reg; 50 void __iomem *reg;
@@ -118,7 +133,6 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
118 struct cpg_z_clk *zclk = to_z_clk(hw); 133 struct cpg_z_clk *zclk = to_z_clk(hw);
119 unsigned int mult; 134 unsigned int mult;
120 unsigned int i; 135 unsigned int i;
121 u32 val, kick;
122 136
123 /* Factor of 2 is for fixed divider */ 137 /* Factor of 2 is for fixed divider */
124 mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate); 138 mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate);
@@ -127,17 +141,14 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
127 if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK) 141 if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
128 return -EBUSY; 142 return -EBUSY;
129 143
130 val = readl(zclk->reg) & ~zclk->mask; 144 cpg_reg_modify(zclk->reg, zclk->mask,
131 val |= ((32 - mult) << __ffs(zclk->mask)) & zclk->mask; 145 ((32 - mult) << __ffs(zclk->mask)) & zclk->mask);
132 writel(val, zclk->reg);
133 146
134 /* 147 /*
135 * Set KICK bit in FRQCRB to update hardware setting and wait for 148 * Set KICK bit in FRQCRB to update hardware setting and wait for
136 * clock change completion. 149 * clock change completion.
137 */ 150 */
138 kick = readl(zclk->kick_reg); 151 cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
139 kick |= CPG_FRQCRB_KICK;
140 writel(kick, zclk->kick_reg);
141 152
142 /* 153 /*
143 * Note: There is no HW information about the worst case latency. 154 * Note: There is no HW information about the worst case latency.
@@ -266,12 +277,10 @@ static const struct sd_div_table cpg_sd_div_table[] = {
266static int cpg_sd_clock_enable(struct clk_hw *hw) 277static int cpg_sd_clock_enable(struct clk_hw *hw)
267{ 278{
268 struct sd_clock *clock = to_sd_clock(hw); 279 struct sd_clock *clock = to_sd_clock(hw);
269 u32 val = readl(clock->csn.reg);
270
271 val &= ~(CPG_SD_STP_MASK);
272 val |= clock->div_table[clock->cur_div_idx].val & CPG_SD_STP_MASK;
273 280
274 writel(val, clock->csn.reg); 281 cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
282 clock->div_table[clock->cur_div_idx].val &
283 CPG_SD_STP_MASK);
275 284
276 return 0; 285 return 0;
277} 286}
@@ -280,7 +289,7 @@ static void cpg_sd_clock_disable(struct clk_hw *hw)
280{ 289{
281 struct sd_clock *clock = to_sd_clock(hw); 290 struct sd_clock *clock = to_sd_clock(hw);
282 291
283 writel(readl(clock->csn.reg) | CPG_SD_STP_MASK, clock->csn.reg); 292 cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
284} 293}
285 294
286static int cpg_sd_clock_is_enabled(struct clk_hw *hw) 295static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
@@ -327,7 +336,6 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
327{ 336{
328 struct sd_clock *clock = to_sd_clock(hw); 337 struct sd_clock *clock = to_sd_clock(hw);
329 unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate); 338 unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
330 u32 val;
331 unsigned int i; 339 unsigned int i;
332 340
333 for (i = 0; i < clock->div_num; i++) 341 for (i = 0; i < clock->div_num; i++)
@@ -339,10 +347,9 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
339 347
340 clock->cur_div_idx = i; 348 clock->cur_div_idx = i;
341 349
342 val = readl(clock->csn.reg); 350 cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
343 val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK); 351 clock->div_table[i].val &
344 val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK); 352 (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
345 writel(val, clock->csn.reg);
346 353
347 return 0; 354 return 0;
348} 355}
@@ -415,6 +422,92 @@ free_clock:
415 return clk; 422 return clk;
416} 423}
417 424
425struct rpc_clock {
426 struct clk_divider div;
427 struct clk_gate gate;
428 /*
429 * One notifier covers both RPC and RPCD2 clocks as they are both
430 * controlled by the same RPCCKCR register...
431 */
432 struct cpg_simple_notifier csn;
433};
434
435static const struct clk_div_table cpg_rpcsrc_div_table[] = {
436 { 2, 5 }, { 3, 6 }, { 0, 0 },
437};
438
439static const struct clk_div_table cpg_rpc_div_table[] = {
440 { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
441};
442
443static struct clk * __init cpg_rpc_clk_register(const char *name,
444 void __iomem *base, const char *parent_name,
445 struct raw_notifier_head *notifiers)
446{
447 struct rpc_clock *rpc;
448 struct clk *clk;
449
450 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
451 if (!rpc)
452 return ERR_PTR(-ENOMEM);
453
454 rpc->div.reg = base + CPG_RPCCKCR;
455 rpc->div.width = 3;
456 rpc->div.table = cpg_rpc_div_table;
457 rpc->div.lock = &cpg_lock;
458
459 rpc->gate.reg = base + CPG_RPCCKCR;
460 rpc->gate.bit_idx = 8;
461 rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
462 rpc->gate.lock = &cpg_lock;
463
464 rpc->csn.reg = base + CPG_RPCCKCR;
465
466 clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
467 &rpc->div.hw, &clk_divider_ops,
468 &rpc->gate.hw, &clk_gate_ops, 0);
469 if (IS_ERR(clk)) {
470 kfree(rpc);
471 return clk;
472 }
473
474 cpg_simple_notifier_register(notifiers, &rpc->csn);
475 return clk;
476}
477
478struct rpcd2_clock {
479 struct clk_fixed_factor fixed;
480 struct clk_gate gate;
481};
482
483static struct clk * __init cpg_rpcd2_clk_register(const char *name,
484 void __iomem *base,
485 const char *parent_name)
486{
487 struct rpcd2_clock *rpcd2;
488 struct clk *clk;
489
490 rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
491 if (!rpcd2)
492 return ERR_PTR(-ENOMEM);
493
494 rpcd2->fixed.mult = 1;
495 rpcd2->fixed.div = 2;
496
497 rpcd2->gate.reg = base + CPG_RPCCKCR;
498 rpcd2->gate.bit_idx = 9;
499 rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
500 rpcd2->gate.lock = &cpg_lock;
501
502 clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
503 &rpcd2->fixed.hw, &clk_fixed_factor_ops,
504 &rpcd2->gate.hw, &clk_gate_ops, 0);
505 if (IS_ERR(clk))
506 kfree(rpcd2);
507
508 return clk;
509}
510
418 511
419static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata; 512static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
420static unsigned int cpg_clk_extalr __initdata; 513static unsigned int cpg_clk_extalr __initdata;
@@ -593,6 +686,21 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
593 } 686 }
594 break; 687 break;
595 688
689 case CLK_TYPE_GEN3_RPCSRC:
690 return clk_register_divider_table(NULL, core->name,
691 __clk_get_name(parent), 0,
692 base + CPG_RPCCKCR, 3, 2, 0,
693 cpg_rpcsrc_div_table,
694 &cpg_lock);
695
696 case CLK_TYPE_GEN3_RPC:
697 return cpg_rpc_clk_register(core->name, base,
698 __clk_get_name(parent), notifiers);
699
700 case CLK_TYPE_GEN3_RPCD2:
701 return cpg_rpcd2_clk_register(core->name, base,
702 __clk_get_name(parent));
703
596 default: 704 default:
597 return ERR_PTR(-EINVAL); 705 return ERR_PTR(-EINVAL);
598 } 706 }
@@ -613,5 +721,8 @@ int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
613 if (attr) 721 if (attr)
614 cpg_quirks = (uintptr_t)attr->data; 722 cpg_quirks = (uintptr_t)attr->data;
615 pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks); 723 pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks);
724
725 spin_lock_init(&cpg_lock);
726
616 return 0; 727 return 0;
617} 728}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index f4fb6cf16688..eac1b057455a 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -23,6 +23,9 @@ enum rcar_gen3_clk_types {
23 CLK_TYPE_GEN3_Z2, 23 CLK_TYPE_GEN3_Z2,
24 CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */ 24 CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */
25 CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */ 25 CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */
26 CLK_TYPE_GEN3_RPCSRC,
27 CLK_TYPE_GEN3_RPC,
28 CLK_TYPE_GEN3_RPCD2,
26 29
27 /* SoC specific definitions start here */ 30 /* SoC specific definitions start here */
28 CLK_TYPE_GEN3_SOC_BASE, 31 CLK_TYPE_GEN3_SOC_BASE,
@@ -57,6 +60,7 @@ struct rcar_gen3_cpg_pll_config {
57 u8 osc_prediv; 60 u8 osc_prediv;
58}; 61};
59 62
63#define CPG_RPCCKCR 0x238
60#define CPG_RCKCR 0x240 64#define CPG_RCKCR 0x240
61 65
62struct clk *rcar_gen3_cpg_clk_register(struct device *dev, 66struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 59d4d46667ce..54066e6508d3 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1028,6 +1028,7 @@ static unsigned long __init exynos4_get_xom(void)
1028 xom = readl(chipid_base + 8); 1028 xom = readl(chipid_base + 8);
1029 1029
1030 iounmap(chipid_base); 1030 iounmap(chipid_base);
1031 of_node_put(np);
1031 } 1032 }
1032 1033
1033 return xom; 1034 return xom;
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 93306283d764..8ae44b5db4c2 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -136,15 +136,20 @@ static int __init exynos5_clk_register_subcmu(struct device *parent,
136{ 136{
137 struct of_phandle_args genpdspec = { .np = pd_node }; 137 struct of_phandle_args genpdspec = { .np = pd_node };
138 struct platform_device *pdev; 138 struct platform_device *pdev;
139 int ret;
140
141 pdev = platform_device_alloc("exynos5-subcmu", PLATFORM_DEVID_AUTO);
142 if (!pdev)
143 return -ENOMEM;
139 144
140 pdev = platform_device_alloc(info->pd_name, -1);
141 pdev->dev.parent = parent; 145 pdev->dev.parent = parent;
142 pdev->driver_override = "exynos5-subcmu";
143 platform_set_drvdata(pdev, (void *)info); 146 platform_set_drvdata(pdev, (void *)info);
144 of_genpd_add_device(&genpdspec, &pdev->dev); 147 of_genpd_add_device(&genpdspec, &pdev->dev);
145 platform_device_add(pdev); 148 ret = platform_device_add(pdev);
149 if (ret)
150 platform_device_put(pdev);
146 151
147 return 0; 152 return ret;
148} 153}
149 154
150static int __init exynos5_clk_probe(struct platform_device *pdev) 155static int __init exynos5_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 751e2c4fb65b..dae1c96de933 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -559,7 +559,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
559 /* ENABLE_ACLK_TOP */ 559 /* ENABLE_ACLK_TOP */
560 GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400", 560 GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400",
561 ENABLE_ACLK_TOP, 30, CLK_IS_CRITICAL, 0), 561 ENABLE_ACLK_TOP, 30, CLK_IS_CRITICAL, 0),
562 GATE(CLK_ACLK_IMEM_SSX_266, "aclk_imem_ssx_266", 562 GATE(CLK_ACLK_IMEM_SSSX_266, "aclk_imem_sssx_266",
563 "div_aclk_imem_sssx_266", ENABLE_ACLK_TOP, 563 "div_aclk_imem_sssx_266", ENABLE_ACLK_TOP,
564 29, CLK_IGNORE_UNUSED, 0), 564 29, CLK_IGNORE_UNUSED, 0),
565 GATE(CLK_ACLK_BUS0_400, "aclk_bus0_400", "div_aclk_bus0_400", 565 GATE(CLK_ACLK_BUS0_400, "aclk_bus0_400", "div_aclk_bus0_400",
@@ -568,10 +568,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
568 GATE(CLK_ACLK_BUS1_400, "aclk_bus1_400", "div_aclk_bus1_400", 568 GATE(CLK_ACLK_BUS1_400, "aclk_bus1_400", "div_aclk_bus1_400",
569 ENABLE_ACLK_TOP, 25, 569 ENABLE_ACLK_TOP, 25,
570 CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), 570 CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
571 GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_266", 571 GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_200",
572 ENABLE_ACLK_TOP, 24, 572 ENABLE_ACLK_TOP, 24,
573 CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), 573 CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
574 GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_200", 574 GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_266",
575 ENABLE_ACLK_TOP, 23, 575 ENABLE_ACLK_TOP, 23,
576 CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), 576 CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
577 GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b", 577 GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b",
@@ -5467,6 +5467,35 @@ static const struct samsung_cmu_info cam1_cmu_info __initconst = {
5467 .clk_name = "aclk_cam1_400", 5467 .clk_name = "aclk_cam1_400",
5468}; 5468};
5469 5469
5470/*
5471 * Register offset definitions for CMU_IMEM
5472 */
5473#define ENABLE_ACLK_IMEM_SLIMSSS 0x080c
5474#define ENABLE_PCLK_IMEM_SLIMSSS 0x0908
5475
5476static const unsigned long imem_clk_regs[] __initconst = {
5477 ENABLE_ACLK_IMEM_SLIMSSS,
5478 ENABLE_PCLK_IMEM_SLIMSSS,
5479};
5480
5481static const struct samsung_gate_clock imem_gate_clks[] __initconst = {
5482 /* ENABLE_ACLK_IMEM_SLIMSSS */
5483 GATE(CLK_ACLK_SLIMSSS, "aclk_slimsss", "aclk_imem_sssx_266",
5484 ENABLE_ACLK_IMEM_SLIMSSS, 0, CLK_IGNORE_UNUSED, 0),
5485
5486 /* ENABLE_PCLK_IMEM_SLIMSSS */
5487 GATE(CLK_PCLK_SLIMSSS, "pclk_slimsss", "aclk_imem_200",
5488 ENABLE_PCLK_IMEM_SLIMSSS, 0, CLK_IGNORE_UNUSED, 0),
5489};
5490
5491static const struct samsung_cmu_info imem_cmu_info __initconst = {
5492 .gate_clks = imem_gate_clks,
5493 .nr_gate_clks = ARRAY_SIZE(imem_gate_clks),
5494 .nr_clk_ids = IMEM_NR_CLK,
5495 .clk_regs = imem_clk_regs,
5496 .nr_clk_regs = ARRAY_SIZE(imem_clk_regs),
5497 .clk_name = "aclk_imem_200",
5498};
5470 5499
5471struct exynos5433_cmu_data { 5500struct exynos5433_cmu_data {
5472 struct samsung_clk_reg_dump *clk_save; 5501 struct samsung_clk_reg_dump *clk_save;
@@ -5655,6 +5684,9 @@ static const struct of_device_id exynos5433_cmu_of_match[] = {
5655 .compatible = "samsung,exynos5433-cmu-mscl", 5684 .compatible = "samsung,exynos5433-cmu-mscl",
5656 .data = &mscl_cmu_info, 5685 .data = &mscl_cmu_info,
5657 }, { 5686 }, {
5687 .compatible = "samsung,exynos5433-cmu-imem",
5688 .data = &imem_cmu_info,
5689 }, {
5658 }, 5690 },
5659}; 5691};
5660 5692
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 884067e4f1a1..f38f0e24e3b6 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -389,7 +389,7 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
389 ARRAY_SIZE(s3c2450_gates)); 389 ARRAY_SIZE(s3c2450_gates));
390 samsung_clk_register_alias(ctx, s3c2450_aliases, 390 samsung_clk_register_alias(ctx, s3c2450_aliases,
391 ARRAY_SIZE(s3c2450_aliases)); 391 ARRAY_SIZE(s3c2450_aliases));
392 /* fall through, as s3c2450 extends the s3c2416 clocks */ 392 /* fall through - as s3c2450 extends the s3c2416 clocks */
393 case S3C2416: 393 case S3C2416:
394 samsung_clk_register_div(ctx, s3c2416_dividers, 394 samsung_clk_register_div(ctx, s3c2416_dividers,
395 ARRAY_SIZE(s3c2416_dividers)); 395 ARRAY_SIZE(s3c2416_dividers));
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index aa7a6e6a15b6..73e03328d5c5 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -176,8 +176,7 @@ static struct clk_ops gateclk_ops = {
176 .set_parent = socfpga_clk_set_parent, 176 .set_parent = socfpga_clk_set_parent,
177}; 177};
178 178
179static void __init __socfpga_gate_init(struct device_node *node, 179void __init socfpga_gate_init(struct device_node *node)
180 const struct clk_ops *ops)
181{ 180{
182 u32 clk_gate[2]; 181 u32 clk_gate[2];
183 u32 div_reg[3]; 182 u32 div_reg[3];
@@ -188,12 +187,17 @@ static void __init __socfpga_gate_init(struct device_node *node,
188 const char *clk_name = node->name; 187 const char *clk_name = node->name;
189 const char *parent_name[SOCFPGA_MAX_PARENTS]; 188 const char *parent_name[SOCFPGA_MAX_PARENTS];
190 struct clk_init_data init; 189 struct clk_init_data init;
190 struct clk_ops *ops;
191 int rc; 191 int rc;
192 192
193 socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); 193 socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
194 if (WARN_ON(!socfpga_clk)) 194 if (WARN_ON(!socfpga_clk))
195 return; 195 return;
196 196
197 ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL);
198 if (WARN_ON(!ops))
199 return;
200
197 rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); 201 rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
198 if (rc) 202 if (rc)
199 clk_gate[0] = 0; 203 clk_gate[0] = 0;
@@ -202,8 +206,8 @@ static void __init __socfpga_gate_init(struct device_node *node,
202 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0]; 206 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
203 socfpga_clk->hw.bit_idx = clk_gate[1]; 207 socfpga_clk->hw.bit_idx = clk_gate[1];
204 208
205 gateclk_ops.enable = clk_gate_ops.enable; 209 ops->enable = clk_gate_ops.enable;
206 gateclk_ops.disable = clk_gate_ops.disable; 210 ops->disable = clk_gate_ops.disable;
207 } 211 }
208 212
209 rc = of_property_read_u32(node, "fixed-divider", &fixed_div); 213 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
@@ -234,6 +238,11 @@ static void __init __socfpga_gate_init(struct device_node *node,
234 init.flags = 0; 238 init.flags = 0;
235 239
236 init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS); 240 init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
241 if (init.num_parents < 2) {
242 ops->get_parent = NULL;
243 ops->set_parent = NULL;
244 }
245
237 init.parent_names = parent_name; 246 init.parent_names = parent_name;
238 socfpga_clk->hw.hw.init = &init; 247 socfpga_clk->hw.hw.init = &init;
239 248
@@ -246,8 +255,3 @@ static void __init __socfpga_gate_init(struct device_node *node,
246 if (WARN_ON(rc)) 255 if (WARN_ON(rc))
247 return; 256 return;
248} 257}
249
250void __init socfpga_gate_init(struct device_node *node)
251{
252 __socfpga_gate_init(node, &gateclk_ops);
253}
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 35fabe1a32c3..269467e8e07e 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -95,6 +95,7 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
95 95
96 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); 96 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
97 clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0); 97 clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0);
98 of_node_put(clkmgr_np);
98 BUG_ON(!clk_mgr_a10_base_addr); 99 BUG_ON(!clk_mgr_a10_base_addr);
99 pll_clk->hw.reg = clk_mgr_a10_base_addr + reg; 100 pll_clk->hw.reg = clk_mgr_a10_base_addr + reg;
100 101
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index c7f463172e4b..b4b44e9b5901 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -100,6 +100,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
100 100
101 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); 101 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
102 clk_mgr_base_addr = of_iomap(clkmgr_np, 0); 102 clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
103 of_node_put(clkmgr_np);
103 BUG_ON(!clk_mgr_base_addr); 104 BUG_ON(!clk_mgr_base_addr);
104 pll_clk->hw.reg = clk_mgr_base_addr + reg; 105 pll_clk->hw.reg = clk_mgr_base_addr + reg;
105 106
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 3b97f60540ad..609970c0b666 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1",
264static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1", 264static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1",
265 0x060, BIT(10), 0); 265 0x060, BIT(10), 0);
266static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1", 266static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1",
267 0x060, BIT(12), 0); 267 0x060, BIT(11), 0);
268static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1", 268static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1",
269 0x060, BIT(13), 0); 269 0x060, BIT(12), 0);
270static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1", 270static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1",
271 0x060, BIT(13), 0); 271 0x060, BIT(13), 0);
272static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1", 272static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index a4fa2945f230..4b5f8f4e4ab8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -144,7 +144,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
144 8, 4, /* N */ 144 8, 4, /* N */
145 4, 2, /* K */ 145 4, 2, /* K */
146 0, 4, /* M */ 146 0, 4, /* M */
147 BIT(31), /* gate */ 147 BIT(31) | BIT(23) | BIT(22), /* gate */
148 BIT(28), /* lock */ 148 BIT(28), /* lock */
149 CLK_SET_RATE_UNGATE); 149 CLK_SET_RATE_UNGATE);
150 150
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 621b1cd996db..ac12f261f8ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
542 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, 542 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
543 543
544 [RST_BUS_VE] = { 0x2c4, BIT(0) }, 544 [RST_BUS_VE] = { 0x2c4, BIT(0) },
545 [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, 545 [RST_BUS_TCON0] = { 0x2c4, BIT(4) },
546 [RST_BUS_CSI] = { 0x2c4, BIT(8) }, 546 [RST_BUS_CSI] = { 0x2c4, BIT(8) },
547 [RST_BUS_DE] = { 0x2c4, BIT(12) }, 547 [RST_BUS_DE] = { 0x2c4, BIT(12) },
548 [RST_BUS_DBG] = { 0x2c4, BIT(31) }, 548 [RST_BUS_DBG] = { 0x2c4, BIT(31) },
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 688e403333b9..0c210984765a 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -614,7 +614,7 @@ static int ti_adpll_init_clkout(struct ti_adpll_data *d,
614 614
615 init.name = child_name; 615 init.name = child_name;
616 init.ops = ops; 616 init.ops = ops;
617 init.flags = CLK_IS_BASIC; 617 init.flags = 0;
618 co->hw.init = &init; 618 co->hw.init = &init;
619 parent_names[0] = __clk_get_name(clk0); 619 parent_names[0] = __clk_get_name(clk0);
620 parent_names[1] = __clk_get_name(clk1); 620 parent_names[1] = __clk_get_name(clk1);
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 222f68bc3f2a..015a657d3382 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -165,7 +165,7 @@ static void __init omap_clk_register_apll(void *user,
165 165
166 ad->clk_bypass = __clk_get_hw(clk); 166 ad->clk_bypass = __clk_get_hw(clk);
167 167
168 clk = ti_clk_register(NULL, &clk_hw->hw, node->name); 168 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
169 if (!IS_ERR(clk)) { 169 if (!IS_ERR(clk)) {
170 of_clk_add_provider(node, of_clk_src_simple_get, clk); 170 of_clk_add_provider(node, of_clk_src_simple_get, clk);
171 kfree(clk_hw->hw.init->parent_names); 171 kfree(clk_hw->hw.init->parent_names);
@@ -402,7 +402,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
402 if (ret) 402 if (ret)
403 goto cleanup; 403 goto cleanup;
404 404
405 clk = clk_register(NULL, &clk_hw->hw); 405 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
406 if (!IS_ERR(clk)) { 406 if (!IS_ERR(clk)) {
407 of_clk_add_provider(node, of_clk_src_simple_get, clk); 407 of_clk_add_provider(node, of_clk_src_simple_get, clk);
408 kfree(init); 408 kfree(init);
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index 7bb9afbe4058..1cae226759dd 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -35,7 +35,44 @@ struct clk_ti_autoidle {
35#define AUTOIDLE_LOW 0x1 35#define AUTOIDLE_LOW 0x1
36 36
37static LIST_HEAD(autoidle_clks); 37static LIST_HEAD(autoidle_clks);
38static LIST_HEAD(clk_hw_omap_clocks); 38
39/*
40 * we have some non-atomic read/write
41 * operations behind it, so lets
42 * take one lock for handling autoidle
43 * of all clocks
44 */
45static DEFINE_SPINLOCK(autoidle_spinlock);
46
47static int _omap2_clk_deny_idle(struct clk_hw_omap *clk)
48{
49 if (clk->ops && clk->ops->deny_idle) {
50 unsigned long irqflags;
51
52 spin_lock_irqsave(&autoidle_spinlock, irqflags);
53 clk->autoidle_count++;
54 if (clk->autoidle_count == 1)
55 clk->ops->deny_idle(clk);
56
57 spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
58 }
59 return 0;
60}
61
62static int _omap2_clk_allow_idle(struct clk_hw_omap *clk)
63{
64 if (clk->ops && clk->ops->allow_idle) {
65 unsigned long irqflags;
66
67 spin_lock_irqsave(&autoidle_spinlock, irqflags);
68 clk->autoidle_count--;
69 if (clk->autoidle_count == 0)
70 clk->ops->allow_idle(clk);
71
72 spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
73 }
74 return 0;
75}
39 76
40/** 77/**
41 * omap2_clk_deny_idle - disable autoidle on an OMAP clock 78 * omap2_clk_deny_idle - disable autoidle on an OMAP clock
@@ -45,12 +82,15 @@ static LIST_HEAD(clk_hw_omap_clocks);
45 */ 82 */
46int omap2_clk_deny_idle(struct clk *clk) 83int omap2_clk_deny_idle(struct clk *clk)
47{ 84{
48 struct clk_hw_omap *c; 85 struct clk_hw *hw = __clk_get_hw(clk);
49 86
50 c = to_clk_hw_omap(__clk_get_hw(clk)); 87 if (omap2_clk_is_hw_omap(hw)) {
51 if (c->ops && c->ops->deny_idle) 88 struct clk_hw_omap *c = to_clk_hw_omap(hw);
52 c->ops->deny_idle(c); 89
53 return 0; 90 return _omap2_clk_deny_idle(c);
91 }
92
93 return -EINVAL;
54} 94}
55 95
56/** 96/**
@@ -61,12 +101,15 @@ int omap2_clk_deny_idle(struct clk *clk)
61 */ 101 */
62int omap2_clk_allow_idle(struct clk *clk) 102int omap2_clk_allow_idle(struct clk *clk)
63{ 103{
64 struct clk_hw_omap *c; 104 struct clk_hw *hw = __clk_get_hw(clk);
65 105
66 c = to_clk_hw_omap(__clk_get_hw(clk)); 106 if (omap2_clk_is_hw_omap(hw)) {
67 if (c->ops && c->ops->allow_idle) 107 struct clk_hw_omap *c = to_clk_hw_omap(hw);
68 c->ops->allow_idle(c); 108
69 return 0; 109 return _omap2_clk_allow_idle(c);
110 }
111
112 return -EINVAL;
70} 113}
71 114
72static void _allow_autoidle(struct clk_ti_autoidle *clk) 115static void _allow_autoidle(struct clk_ti_autoidle *clk)
@@ -168,26 +211,6 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node)
168} 211}
169 212
170/** 213/**
171 * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
172 * @hw: struct clk_hw * to initialize
173 *
174 * Add an OMAP clock @clk to the internal list of OMAP clocks. Used
175 * temporarily for autoidle handling, until this support can be
176 * integrated into the common clock framework code in some way. No
177 * return value.
178 */
179void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
180{
181 struct clk_hw_omap *c;
182
183 if (clk_hw_get_flags(hw) & CLK_IS_BASIC)
184 return;
185
186 c = to_clk_hw_omap(hw);
187 list_add(&c->node, &clk_hw_omap_clocks);
188}
189
190/**
191 * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that 214 * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
192 * support it 215 * support it
193 * 216 *
@@ -198,11 +221,11 @@ void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
198 */ 221 */
199int omap2_clk_enable_autoidle_all(void) 222int omap2_clk_enable_autoidle_all(void)
200{ 223{
201 struct clk_hw_omap *c; 224 int ret;
202 225
203 list_for_each_entry(c, &clk_hw_omap_clocks, node) 226 ret = omap2_clk_for_each(_omap2_clk_allow_idle);
204 if (c->ops && c->ops->allow_idle) 227 if (ret)
205 c->ops->allow_idle(c); 228 return ret;
206 229
207 _clk_generic_allow_autoidle_all(); 230 _clk_generic_allow_autoidle_all();
208 231
@@ -220,11 +243,11 @@ int omap2_clk_enable_autoidle_all(void)
220 */ 243 */
221int omap2_clk_disable_autoidle_all(void) 244int omap2_clk_disable_autoidle_all(void)
222{ 245{
223 struct clk_hw_omap *c; 246 int ret;
224 247
225 list_for_each_entry(c, &clk_hw_omap_clocks, node) 248 ret = omap2_clk_for_each(_omap2_clk_deny_idle);
226 if (c->ops && c->ops->deny_idle) 249 if (ret)
227 c->ops->deny_idle(c); 250 return ret;
228 251
229 _clk_generic_deny_autoidle_all(); 252 _clk_generic_deny_autoidle_all();
230 253
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index d0cd58534781..ff164a33f67d 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -31,6 +31,7 @@
31#undef pr_fmt 31#undef pr_fmt
32#define pr_fmt(fmt) "%s: " fmt, __func__ 32#define pr_fmt(fmt) "%s: " fmt, __func__
33 33
34static LIST_HEAD(clk_hw_omap_clocks);
34struct ti_clk_ll_ops *ti_clk_ll_ops; 35struct ti_clk_ll_ops *ti_clk_ll_ops;
35static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS]; 36static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS];
36 37
@@ -191,9 +192,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
191 clkdev_add(&c->lk); 192 clkdev_add(&c->lk);
192 } else { 193 } else {
193 if (num_args && !has_clkctrl_data) { 194 if (num_args && !has_clkctrl_data) {
194 if (of_find_compatible_node(NULL, NULL, 195 struct device_node *np;
195 "ti,clkctrl")) { 196
197 np = of_find_compatible_node(NULL, NULL,
198 "ti,clkctrl");
199 if (np) {
196 has_clkctrl_data = true; 200 has_clkctrl_data = true;
201 of_node_put(np);
197 } else { 202 } else {
198 clkctrl_nodes_missing = true; 203 clkctrl_nodes_missing = true;
199 204
@@ -517,3 +522,74 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
517 522
518 return clk; 523 return clk;
519} 524}
525
526/**
527 * ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework
528 * @dev: device for this clock
529 * @hw: hardware clock handle
530 * @con: connection ID for this clock
531 *
532 * Registers a clk_hw_omap clock to the clock framewor, adds a clock alias
533 * for it, and adds the list to the available clk_hw_omap type clocks.
534 * Returns a handle to the registered clock if successful, ERR_PTR value
535 * in failure.
536 */
537struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
538 const char *con)
539{
540 struct clk *clk;
541 struct clk_hw_omap *oclk;
542
543 clk = ti_clk_register(dev, hw, con);
544 if (IS_ERR(clk))
545 return clk;
546
547 oclk = to_clk_hw_omap(hw);
548
549 list_add(&oclk->node, &clk_hw_omap_clocks);
550
551 return clk;
552}
553
554/**
555 * omap2_clk_for_each - call function for each registered clk_hw_omap
556 * @fn: pointer to a callback function
557 *
558 * Call @fn for each registered clk_hw_omap, passing @hw to each
559 * function. @fn must return 0 for success or any other value for
560 * failure. If @fn returns non-zero, the iteration across clocks
561 * will stop and the non-zero return value will be passed to the
562 * caller of omap2_clk_for_each().
563 */
564int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw))
565{
566 int ret;
567 struct clk_hw_omap *hw;
568
569 list_for_each_entry(hw, &clk_hw_omap_clocks, node) {
570 ret = (*fn)(hw);
571 if (ret)
572 break;
573 }
574
575 return ret;
576}
577
578/**
579 * omap2_clk_is_hw_omap - check if the provided clk_hw is OMAP clock
580 * @hw: clk_hw to check if it is an omap clock or not
581 *
582 * Checks if the provided clk_hw is OMAP clock or not. Returns true if
583 * it is, false otherwise.
584 */
585bool omap2_clk_is_hw_omap(struct clk_hw *hw)
586{
587 struct clk_hw_omap *oclk;
588
589 list_for_each_entry(oclk, &clk_hw_omap_clocks, node) {
590 if (&oclk->hw == hw)
591 return true;
592 }
593
594 return false;
595}
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 40630eb950fc..bf32d996177f 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -276,7 +276,7 @@ _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
276 init.parent_names = parents; 276 init.parent_names = parents;
277 init.num_parents = num_parents; 277 init.num_parents = num_parents;
278 init.ops = ops; 278 init.ops = ops;
279 init.flags = CLK_IS_BASIC; 279 init.flags = 0;
280 280
281 clk = ti_clk_register(NULL, clk_hw, init.name); 281 clk = ti_clk_register(NULL, clk_hw, init.name);
282 if (IS_ERR_OR_NULL(clk)) { 282 if (IS_ERR_OR_NULL(clk)) {
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 9f312a219510..1c0fac59d809 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -203,6 +203,8 @@ typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
203 203
204struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw, 204struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
205 const char *con); 205 const char *con);
206struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
207 const char *con);
206int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con); 208int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con);
207void ti_clk_add_aliases(void); 209void ti_clk_add_aliases(void);
208 210
@@ -221,7 +223,6 @@ int ti_clk_retry_init(struct device_node *node, void *user,
221 ti_of_clk_init_cb_t func); 223 ti_of_clk_init_cb_t func);
222int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type); 224int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
223 225
224void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw);
225int of_ti_clk_autoidle_setup(struct device_node *node); 226int of_ti_clk_autoidle_setup(struct device_node *node);
226void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); 227void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
227 228
@@ -301,6 +302,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
301 unsigned long *parent_rate); 302 unsigned long *parent_rate);
302int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, 303int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
303 struct clk_rate_request *req); 304 struct clk_rate_request *req);
305int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
306bool omap2_clk_is_hw_omap(struct clk_hw *hw);
304 307
305extern struct ti_clk_ll_ops *ti_clk_ll_ops; 308extern struct ti_clk_ll_ops *ti_clk_ll_ops;
306 309
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index 07a805125e98..423a99b9f10c 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -143,7 +143,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
143 continue; 143 continue;
144 } 144 }
145 clk_hw = __clk_get_hw(clk); 145 clk_hw = __clk_get_hw(clk);
146 if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) { 146 if (!omap2_clk_is_hw_omap(clk_hw)) {
147 pr_warn("can't setup clkdm for basic clk %s\n", 147 pr_warn("can't setup clkdm for basic clk %s\n",
148 __clk_get_name(clk)); 148 __clk_get_name(clk));
149 continue; 149 continue;
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 0241450f3eb3..4786e0ebc2e8 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -336,7 +336,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
336 336
337 init.name = name; 337 init.name = name;
338 init.ops = &ti_clk_divider_ops; 338 init.ops = &ti_clk_divider_ops;
339 init.flags = flags | CLK_IS_BASIC; 339 init.flags = flags;
340 init.parent_names = (parent_name ? &parent_name : NULL); 340 init.parent_names = (parent_name ? &parent_name : NULL);
341 init.num_parents = (parent_name ? 1 : 0); 341 init.num_parents = (parent_name ? 1 : 0);
342 342
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 6c3329bc116f..659dadb23279 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -192,10 +192,9 @@ static void __init _register_dpll(void *user,
192 dd->clk_bypass = __clk_get_hw(clk); 192 dd->clk_bypass = __clk_get_hw(clk);
193 193
194 /* register the clock */ 194 /* register the clock */
195 clk = ti_clk_register(NULL, &clk_hw->hw, node->name); 195 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
196 196
197 if (!IS_ERR(clk)) { 197 if (!IS_ERR(clk)) {
198 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
199 of_clk_add_provider(node, of_clk_src_simple_get, clk); 198 of_clk_add_provider(node, of_clk_src_simple_get, clk);
200 kfree(clk_hw->hw.init->parent_names); 199 kfree(clk_hw->hw.init->parent_names);
201 kfree(clk_hw->hw.init); 200 kfree(clk_hw->hw.init);
@@ -265,14 +264,12 @@ static void _register_dpll_x2(struct device_node *node,
265#endif 264#endif
266 265
267 /* register the clock */ 266 /* register the clock */
268 clk = ti_clk_register(NULL, &clk_hw->hw, name); 267 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
269 268
270 if (IS_ERR(clk)) { 269 if (IS_ERR(clk))
271 kfree(clk_hw); 270 kfree(clk_hw);
272 } else { 271 else
273 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
274 of_clk_add_provider(node, of_clk_src_simple_get, clk); 272 of_clk_add_provider(node, of_clk_src_simple_get, clk);
275 }
276} 273}
277#endif 274#endif
278 275
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 44b6b6403753..3dde6c8c3354 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -731,7 +731,7 @@ static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
731 do { 731 do {
732 do { 732 do {
733 hw = clk_hw_get_parent(hw); 733 hw = clk_hw_get_parent(hw);
734 } while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC)); 734 } while (hw && (!omap2_clk_is_hw_omap(hw)));
735 if (!hw) 735 if (!hw)
736 break; 736 break;
737 pclk = to_clk_hw_omap(hw); 737 pclk = to_clk_hw_omap(hw);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 1c78fff5513c..504c0e91cdc7 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -123,7 +123,7 @@ static struct clk *_register_gate(struct device *dev, const char *name,
123 123
124 init.flags = flags; 124 init.flags = flags;
125 125
126 clk = ti_clk_register(NULL, &clk_hw->hw, name); 126 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
127 127
128 if (IS_ERR(clk)) 128 if (IS_ERR(clk))
129 kfree(clk_hw); 129 kfree(clk_hw);
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 87e00c2ee957..83e34429d3b1 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -57,12 +57,10 @@ static struct clk *_register_interface(struct device *dev, const char *name,
57 init.num_parents = 1; 57 init.num_parents = 1;
58 init.parent_names = &parent_name; 58 init.parent_names = &parent_name;
59 59
60 clk = ti_clk_register(NULL, &clk_hw->hw, name); 60 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
61 61
62 if (IS_ERR(clk)) 62 if (IS_ERR(clk))
63 kfree(clk_hw); 63 kfree(clk_hw);
64 else
65 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
66 64
67 return clk; 65 return clk;
68} 66}
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 883bdde94d04..b7f9a4f068bf 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -143,7 +143,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
143 143
144 init.name = name; 144 init.name = name;
145 init.ops = &ti_clk_mux_ops; 145 init.ops = &ti_clk_mux_ops;
146 init.flags = flags | CLK_IS_BASIC; 146 init.flags = flags;
147 init.parent_names = parent_names; 147 init.parent_names = parent_names;
148 init.num_parents = num_parents; 148 init.num_parents = num_parents;
149 149
diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
index ec11f55594ad..5d2d42b7e182 100644
--- a/drivers/clk/uniphier/clk-uniphier-cpugear.c
+++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
@@ -47,7 +47,7 @@ static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index)
47 return ret; 47 return ret;
48 48
49 ret = regmap_write_bits(gear->regmap, 49 ret = regmap_write_bits(gear->regmap,
50 gear->regbase + UNIPHIER_CLK_CPUGEAR_SET, 50 gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD,
51 UNIPHIER_CLK_CPUGEAR_UPD_BIT, 51 UNIPHIER_CLK_CPUGEAR_UPD_BIT,
52 UNIPHIER_CLK_CPUGEAR_UPD_BIT); 52 UNIPHIER_CLK_CPUGEAR_UPD_BIT);
53 if (ret) 53 if (ret)
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c
index 3a0996f2d556..25d4b97aff9b 100644
--- a/drivers/clk/x86/clk-st.c
+++ b/drivers/clk/x86/clk-st.c
@@ -52,7 +52,8 @@ static int st_clk_probe(struct platform_device *pdev)
52 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, 52 0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
53 CLK_GATE_SET_TO_DISABLE, NULL); 53 CLK_GATE_SET_TO_DISABLE, NULL);
54 54
55 clk_hw_register_clkdev(hws[ST_CLK_GATE], "oscout1", NULL); 55 devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE], "oscout1",
56 NULL);
56 57
57 return 0; 58 return 0;
58} 59}
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 595124074821..c364027638e1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
154 if (IS_ERR(parent)) 154 if (IS_ERR(parent))
155 return -ENODEV; 155 return -ENODEV;
156 156
157 /* Bail out if both clocks point to fck */
158 if (clk_is_match(parent, timer->fclk))
159 return 0;
160
157 ret = clk_set_parent(timer->fclk, parent); 161 ret = clk_set_parent(timer->fclk, parent);
158 if (ret < 0) 162 if (ret < 0)
159 pr_err("%s: failed to set parent\n", __func__); 163 pr_err("%s: failed to set parent\n", __func__);
@@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
864 timer->pdev = pdev; 868 timer->pdev = pdev;
865 869
866 pm_runtime_enable(dev); 870 pm_runtime_enable(dev);
867 pm_runtime_irq_safe(dev);
868 871
869 if (!timer->reserved) { 872 if (!timer->reserved) {
870 ret = pm_runtime_get_sync(dev); 873 ret = pm_runtime_get_sync(dev);
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 242c3370544e..9ed46d188cb5 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
187 187
188 cpufreq_cooling_unregister(priv->cdev); 188 cpufreq_cooling_unregister(priv->cdev);
189 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 189 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
190 kfree(priv);
191 dev_pm_opp_remove_all_dynamic(priv->cpu_dev); 190 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
191 kfree(priv);
192 192
193 return 0; 193 return 0;
194} 194}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8ada308d72ee..b0125ad65825 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
380 rc = cc_ivgen_init(new_drvdata); 380 rc = cc_ivgen_init(new_drvdata);
381 if (rc) { 381 if (rc) {
382 dev_err(dev, "cc_ivgen_init failed\n"); 382 dev_err(dev, "cc_ivgen_init failed\n");
383 goto post_power_mgr_err; 383 goto post_buf_mgr_err;
384 } 384 }
385 385
386 /* Allocate crypto algs */ 386 /* Allocate crypto algs */
@@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
403 goto post_hash_err; 403 goto post_hash_err;
404 } 404 }
405 405
406 /* All set, we can allow autosuspend */
407 cc_pm_go(new_drvdata);
408
406 /* If we got here and FIPS mode is enabled 409 /* If we got here and FIPS mode is enabled
407 * it means all FIPS test passed, so let TEE 410 * it means all FIPS test passed, so let TEE
408 * know we're good. 411 * know we're good.
@@ -417,8 +420,6 @@ post_cipher_err:
417 cc_cipher_free(new_drvdata); 420 cc_cipher_free(new_drvdata);
418post_ivgen_err: 421post_ivgen_err:
419 cc_ivgen_fini(new_drvdata); 422 cc_ivgen_fini(new_drvdata);
420post_power_mgr_err:
421 cc_pm_fini(new_drvdata);
422post_buf_mgr_err: 423post_buf_mgr_err:
423 cc_buffer_mgr_fini(new_drvdata); 424 cc_buffer_mgr_fini(new_drvdata);
424post_req_mgr_err: 425post_req_mgr_err:
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index d990f472e89f..6ff7e75ad90e 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev)
100 100
101int cc_pm_init(struct cc_drvdata *drvdata) 101int cc_pm_init(struct cc_drvdata *drvdata)
102{ 102{
103 int rc = 0;
104 struct device *dev = drvdata_to_dev(drvdata); 103 struct device *dev = drvdata_to_dev(drvdata);
105 104
106 /* must be before the enabling to avoid resdundent suspending */ 105 /* must be before the enabling to avoid resdundent suspending */
107 pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); 106 pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
108 pm_runtime_use_autosuspend(dev); 107 pm_runtime_use_autosuspend(dev);
109 /* activate the PM module */ 108 /* activate the PM module */
110 rc = pm_runtime_set_active(dev); 109 return pm_runtime_set_active(dev);
111 if (rc) 110}
112 return rc;
113 /* enable the PM module*/
114 pm_runtime_enable(dev);
115 111
116 return rc; 112/* enable the PM module*/
113void cc_pm_go(struct cc_drvdata *drvdata)
114{
115 pm_runtime_enable(drvdata_to_dev(drvdata));
117} 116}
118 117
119void cc_pm_fini(struct cc_drvdata *drvdata) 118void cc_pm_fini(struct cc_drvdata *drvdata)
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 020a5403c58b..f62624357020 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -16,6 +16,7 @@
16extern const struct dev_pm_ops ccree_pm; 16extern const struct dev_pm_ops ccree_pm;
17 17
18int cc_pm_init(struct cc_drvdata *drvdata); 18int cc_pm_init(struct cc_drvdata *drvdata);
19void cc_pm_go(struct cc_drvdata *drvdata);
19void cc_pm_fini(struct cc_drvdata *drvdata); 20void cc_pm_fini(struct cc_drvdata *drvdata);
20int cc_pm_suspend(struct device *dev); 21int cc_pm_suspend(struct device *dev);
21int cc_pm_resume(struct device *dev); 22int cc_pm_resume(struct device *dev);
@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
29 return 0; 30 return 0;
30} 31}
31 32
33static void cc_pm_go(struct cc_drvdata *drvdata) {}
34
32static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} 35static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
33 36
34static inline int cc_pm_suspend(struct device *dev) 37static inline int cc_pm_suspend(struct device *dev)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4c46ff6f2242..55b77c576c42 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
592 592
593 early_memunmap(tbl, sizeof(*tbl)); 593 early_memunmap(tbl, sizeof(*tbl));
594 } 594 }
595 return 0;
596}
597 595
598int __init efi_apply_persistent_mem_reservations(void)
599{
600 if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { 596 if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
601 unsigned long prsv = efi.mem_reserve; 597 unsigned long prsv = efi.mem_reserve;
602 598
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index eee42d5e25ee..c037c6c5d0b7 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
75 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; 75 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
76 efi_status_t status; 76 efi_status_t status;
77 77
78 if (IS_ENABLED(CONFIG_ARM))
79 return;
80
81 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), 78 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
82 (void **)&rsv); 79 (void **)&rsv);
83 if (status != EFI_SUCCESS) { 80 if (status != EFI_SUCCESS) {
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 8903b9ccfc2b..e2abfdb5cee6 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
147static DEFINE_SEMAPHORE(efi_runtime_lock); 147static DEFINE_SEMAPHORE(efi_runtime_lock);
148 148
149/* 149/*
150 * Expose the EFI runtime lock to the UV platform
151 */
152#ifdef CONFIG_X86_UV
153extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
154#endif
155
156/*
150 * Calls the appropriate efi_runtime_service() with the appropriate 157 * Calls the appropriate efi_runtime_service() with the appropriate
151 * arguments. 158 * arguments.
152 * 159 *
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 00e954f22bc9..74401e0adb29 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -30,6 +30,7 @@
30#define GPIO_REG_EDGE 0xA0 30#define GPIO_REG_EDGE 0xA0
31 31
32struct mtk_gc { 32struct mtk_gc {
33 struct irq_chip irq_chip;
33 struct gpio_chip chip; 34 struct gpio_chip chip;
34 spinlock_t lock; 35 spinlock_t lock;
35 int bank; 36 int bank;
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
189 return 0; 190 return 0;
190} 191}
191 192
192static struct irq_chip mediatek_gpio_irq_chip = {
193 .irq_unmask = mediatek_gpio_irq_unmask,
194 .irq_mask = mediatek_gpio_irq_mask,
195 .irq_mask_ack = mediatek_gpio_irq_mask,
196 .irq_set_type = mediatek_gpio_irq_type,
197};
198
199static int 193static int
200mediatek_gpio_xlate(struct gpio_chip *chip, 194mediatek_gpio_xlate(struct gpio_chip *chip,
201 const struct of_phandle_args *spec, u32 *flags) 195 const struct of_phandle_args *spec, u32 *flags)
@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
254 return ret; 248 return ret;
255 } 249 }
256 250
251 rg->irq_chip.name = dev_name(dev);
252 rg->irq_chip.parent_device = dev;
253 rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
254 rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
255 rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
256 rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
257
257 if (mtk->gpio_irq) { 258 if (mtk->gpio_irq) {
258 /* 259 /*
259 * Manually request the irq here instead of passing 260 * Manually request the irq here instead of passing
@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
270 return ret; 271 return ret;
271 } 272 }
272 273
273 ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip, 274 ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
274 0, handle_simple_irq, IRQ_TYPE_NONE); 275 0, handle_simple_irq, IRQ_TYPE_NONE);
275 if (ret) { 276 if (ret) {
276 dev_err(dev, "failed to add gpiochip_irqchip\n"); 277 dev_err(dev, "failed to add gpiochip_irqchip\n");
277 return ret; 278 return ret;
278 } 279 }
279 280
280 gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip, 281 gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
281 mtk->gpio_irq, NULL); 282 mtk->gpio_irq, NULL);
282 } 283 }
283 284
@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
310 mtk->gpio_irq = irq_of_parse_and_map(np, 0); 311 mtk->gpio_irq = irq_of_parse_and_map(np, 0);
311 mtk->dev = dev; 312 mtk->dev = dev;
312 platform_set_drvdata(pdev, mtk); 313 platform_set_drvdata(pdev, mtk);
313 mediatek_gpio_irq_chip.name = dev_name(dev);
314 314
315 for (i = 0; i < MTK_BANK_CNT; i++) { 315 for (i = 0; i < MTK_BANK_CNT; i++) {
316 ret = mediatek_gpio_bank_probe(dev, np, i); 316 ret = mediatek_gpio_bank_probe(dev, np, i);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index e9600b556f39..bcc6be4a5cb2 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
245{ 245{
246 switch (gpio_type) { 246 switch (gpio_type) {
247 case PXA3XX_GPIO: 247 case PXA3XX_GPIO:
248 case MMP2_GPIO:
248 return false; 249 return false;
249 250
250 default: 251 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index bc62bf41b7e9..5dc349173e4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
212 } 212 }
213 213
214 if (amdgpu_device_is_px(dev)) { 214 if (amdgpu_device_is_px(dev)) {
215 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
215 pm_runtime_use_autosuspend(dev->dev); 216 pm_runtime_use_autosuspend(dev->dev);
216 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 217 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
217 pm_runtime_set_active(dev->dev); 218 pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8fab0d637ee5..3a9b48b227ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle)
90 adev->psp.sos_fw = NULL; 90 adev->psp.sos_fw = NULL;
91 release_firmware(adev->psp.asd_fw); 91 release_firmware(adev->psp.asd_fw);
92 adev->psp.asd_fw = NULL; 92 adev->psp.asd_fw = NULL;
93 release_firmware(adev->psp.ta_fw); 93 if (adev->psp.ta_fw) {
94 adev->psp.ta_fw = NULL; 94 release_firmware(adev->psp.ta_fw);
95 adev->psp.ta_fw = NULL;
96 }
95 return 0; 97 return 0;
96} 98}
97 99
@@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
435 struct ta_xgmi_shared_memory *xgmi_cmd; 437 struct ta_xgmi_shared_memory *xgmi_cmd;
436 int ret; 438 int ret;
437 439
440 if (!psp->adev->psp.ta_fw)
441 return -ENOENT;
442
438 if (!psp->xgmi_context.initialized) { 443 if (!psp->xgmi_context.initialized) {
439 ret = psp_xgmi_init_shared_buf(psp); 444 ret = psp_xgmi_init_shared_buf(psp);
440 if (ret) 445 if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7c108e687683..698bcb8ce61d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -638,12 +638,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
638 struct ttm_bo_global *glob = adev->mman.bdev.glob; 638 struct ttm_bo_global *glob = adev->mman.bdev.glob;
639 struct amdgpu_vm_bo_base *bo_base; 639 struct amdgpu_vm_bo_base *bo_base;
640 640
641#if 0
641 if (vm->bulk_moveable) { 642 if (vm->bulk_moveable) {
642 spin_lock(&glob->lru_lock); 643 spin_lock(&glob->lru_lock);
643 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); 644 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
644 spin_unlock(&glob->lru_lock); 645 spin_unlock(&glob->lru_lock);
645 return; 646 return;
646 } 647 }
648#endif
647 649
648 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); 650 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
649 651
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0c6e7f9b143f..189fcb004579 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
152 152
153 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 153 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
154 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); 154 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
155 if (err) 155 if (err) {
156 goto out2; 156 release_firmware(adev->psp.ta_fw);
157 157 adev->psp.ta_fw = NULL;
158 err = amdgpu_ucode_validate(adev->psp.ta_fw); 158 dev_info(adev->dev,
159 if (err) 159 "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
160 goto out2; 160 } else {
161 161 err = amdgpu_ucode_validate(adev->psp.ta_fw);
162 ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; 162 if (err)
163 adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); 163 goto out2;
164 adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); 164
165 adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + 165 ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
166 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 166 adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
167 adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
168 adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
169 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
170 }
167 171
168 return 0; 172 return 0;
169 173
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 6811a5d05b27..aa2f71cc1eba 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -128,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
128 128
129static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = 129static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
130{ 130{
131 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), 131 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
132 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), 132 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
133 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), 133 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
134 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), 134 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
@@ -158,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
158}; 158};
159 159
160static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { 160static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
161 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), 161 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
162 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), 162 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
163 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), 163 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
164 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), 164 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0b392bfca284..5296b8f3e0ab 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -786,12 +786,13 @@ static int dm_suspend(void *handle)
786 struct amdgpu_display_manager *dm = &adev->dm; 786 struct amdgpu_display_manager *dm = &adev->dm;
787 int ret = 0; 787 int ret = 0;
788 788
789 WARN_ON(adev->dm.cached_state);
790 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
791
789 s3_handle_mst(adev->ddev, true); 792 s3_handle_mst(adev->ddev, true);
790 793
791 amdgpu_dm_irq_suspend(adev); 794 amdgpu_dm_irq_suspend(adev);
792 795
793 WARN_ON(adev->dm.cached_state);
794 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
795 796
796 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 797 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
797 798
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 9a7ac58eb18e..ddd75a4d8ba5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
671 return bytes_from_user; 671 return bytes_from_user;
672} 672}
673 673
674/*
675 * Returns the min and max vrr vfreq through the connector's debugfs file.
676 * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
677 */
678static int vrr_range_show(struct seq_file *m, void *data)
679{
680 struct drm_connector *connector = m->private;
681 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
682
683 if (connector->status != connector_status_connected)
684 return -ENODEV;
685
686 seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq);
687 seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq);
688
689 return 0;
690}
691DEFINE_SHOW_ATTRIBUTE(vrr_range);
692
674static const struct file_operations dp_link_settings_debugfs_fops = { 693static const struct file_operations dp_link_settings_debugfs_fops = {
675 .owner = THIS_MODULE, 694 .owner = THIS_MODULE,
676 .read = dp_link_settings_read, 695 .read = dp_link_settings_read,
@@ -697,7 +716,8 @@ static const struct {
697} dp_debugfs_entries[] = { 716} dp_debugfs_entries[] = {
698 {"link_settings", &dp_link_settings_debugfs_fops}, 717 {"link_settings", &dp_link_settings_debugfs_fops},
699 {"phy_settings", &dp_phy_settings_debugfs_fop}, 718 {"phy_settings", &dp_phy_settings_debugfs_fop},
700 {"test_pattern", &dp_phy_test_pattern_fops} 719 {"test_pattern", &dp_phy_test_pattern_fops},
720 {"vrr_range", &vrr_range_fops}
701}; 721};
702 722
703int connector_debugfs_init(struct amdgpu_dm_connector *connector) 723int connector_debugfs_init(struct amdgpu_dm_connector *connector)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 19801bdba0d2..7a72ee46f14b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -662,6 +662,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
662{ 662{
663 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 663 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
664 struct dm_pp_power_level_change_request level_change_req; 664 struct dm_pp_power_level_change_request level_change_req;
665 int patched_disp_clk = context->bw.dce.dispclk_khz;
666
667 /*TODO: W/A for dal3 linux, investigate why this works */
668 if (!clk_mgr_dce->dfs_bypass_active)
669 patched_disp_clk = patched_disp_clk * 115 / 100;
665 670
666 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 671 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
667 /* get max clock state from PPLIB */ 672 /* get max clock state from PPLIB */
@@ -671,9 +676,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
671 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; 676 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
672 } 677 }
673 678
674 if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { 679 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
675 context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); 680 context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
676 clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; 681 clk_mgr->clks.dispclk_khz = patched_disp_clk;
677 } 682 }
678 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 683 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
679} 684}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index acd418515346..a6b80fdaa666 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -37,6 +37,10 @@ void dce100_prepare_bandwidth(
37 struct dc *dc, 37 struct dc *dc,
38 struct dc_state *context); 38 struct dc_state *context);
39 39
40void dce100_optimize_bandwidth(
41 struct dc *dc,
42 struct dc_state *context);
43
40bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, 44bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
41 struct dc_bios *dcb, 45 struct dc_bios *dcb,
42 enum pipe_gating_control power_gating); 46 enum pipe_gating_control power_gating);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index a60a90e68d91..c4543178ba20 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc)
77 dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; 77 dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
78 dc->hwss.pipe_control_lock = dce_pipe_control_lock; 78 dc->hwss.pipe_control_lock = dce_pipe_control_lock;
79 dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; 79 dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
80 dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; 80 dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
81} 81}
82 82
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index cdd1d6b7b9f2..4e9ea50141bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -790,9 +790,22 @@ bool dce80_validate_bandwidth(
790 struct dc *dc, 790 struct dc *dc,
791 struct dc_state *context) 791 struct dc_state *context)
792{ 792{
793 /* TODO implement when needed but for now hardcode max value*/ 793 int i;
794 context->bw.dce.dispclk_khz = 681000; 794 bool at_least_one_pipe = false;
795 context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; 795
796 for (i = 0; i < dc->res_pool->pipe_count; i++) {
797 if (context->res_ctx.pipe_ctx[i].stream)
798 at_least_one_pipe = true;
799 }
800
801 if (at_least_one_pipe) {
802 /* TODO implement when needed but for now hardcode max value*/
803 context->bw.dce.dispclk_khz = 681000;
804 context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
805 } else {
806 context->bw.dce.dispclk_khz = 0;
807 context->bw.dce.yclk_khz = 0;
808 }
796 809
797 return true; 810 return true;
798} 811}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 58a12ddf12f3..41883c981789 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2658,8 +2658,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2658 .mirror = pipe_ctx->plane_state->horizontal_mirror 2658 .mirror = pipe_ctx->plane_state->horizontal_mirror
2659 }; 2659 };
2660 2660
2661 pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x; 2661 pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
2662 pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y; 2662 pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
2663 2663
2664 if (pipe_ctx->plane_state->address.type 2664 if (pipe_ctx->plane_state->address.type
2665 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) 2665 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 99cba8ea5d82..5df1256618cc 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -528,7 +528,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
528 528
529 object_count = cl->object_count; 529 object_count = cl->object_count;
530 530
531 object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32)); 531 object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
532 array_size(object_count, sizeof(__u32)));
532 if (IS_ERR(object_ids)) 533 if (IS_ERR(object_ids))
533 return PTR_ERR(object_ids); 534 return PTR_ERR(object_ids);
534 535
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 216f52b744a6..c882ea94172c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1824 return 0; 1824 return 0;
1825} 1825}
1826 1826
1827static inline bool
1828__vma_matches(struct vm_area_struct *vma, struct file *filp,
1829 unsigned long addr, unsigned long size)
1830{
1831 if (vma->vm_file != filp)
1832 return false;
1833
1834 return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
1835}
1836
1827/** 1837/**
1828 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 1838 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1829 * it is mapped to. 1839 * it is mapped to.
@@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1882 return -EINTR; 1892 return -EINTR;
1883 } 1893 }
1884 vma = find_vma(mm, addr); 1894 vma = find_vma(mm, addr);
1885 if (vma) 1895 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
1886 vma->vm_page_prot = 1896 vma->vm_page_prot =
1887 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1897 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1888 else 1898 else
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d6c8f8fdfda5..017fc602a10e 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event)
594 * Update the bitmask of enabled events and increment 594 * Update the bitmask of enabled events and increment
595 * the event reference counter. 595 * the event reference counter.
596 */ 596 */
597 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 597 BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
598 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
598 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); 599 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
599 i915->pmu.enable |= BIT_ULL(bit); 600 i915->pmu.enable |= BIT_ULL(bit);
600 i915->pmu.enable_count[bit]++; 601 i915->pmu.enable_count[bit]++;
@@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event)
615 engine = intel_engine_lookup_user(i915, 616 engine = intel_engine_lookup_user(i915,
616 engine_event_class(event), 617 engine_event_class(event),
617 engine_event_instance(event)); 618 engine_event_instance(event));
618 GEM_BUG_ON(!engine);
619 engine->pmu.enable |= BIT(sample);
620 619
621 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 620 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
621 I915_ENGINE_SAMPLE_COUNT);
622 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
623 I915_ENGINE_SAMPLE_COUNT);
624 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
625 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
622 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 626 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
627
628 engine->pmu.enable |= BIT(sample);
623 engine->pmu.enable_count[sample]++; 629 engine->pmu.enable_count[sample]++;
624 } 630 }
625 631
@@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event)
649 engine = intel_engine_lookup_user(i915, 655 engine = intel_engine_lookup_user(i915,
650 engine_event_class(event), 656 engine_event_class(event),
651 engine_event_instance(event)); 657 engine_event_instance(event));
652 GEM_BUG_ON(!engine); 658
653 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 659 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
660 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
654 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 661 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
662
655 /* 663 /*
656 * Decrement the reference count and clear the enabled 664 * Decrement the reference count and clear the enabled
657 * bitmask when the last listener on an event goes away. 665 * bitmask when the last listener on an event goes away.
@@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event)
660 engine->pmu.enable &= ~BIT(sample); 668 engine->pmu.enable &= ~BIT(sample);
661 } 669 }
662 670
663 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 671 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
664 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); 672 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
665 /* 673 /*
666 * Decrement the reference count and clear the enabled 674 * Decrement the reference count and clear the enabled
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 7f164ca3db12..b3728c5f13e7 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -31,6 +31,8 @@ enum {
31 ((1 << I915_PMU_SAMPLE_BITS) + \ 31 ((1 << I915_PMU_SAMPLE_BITS) + \
32 (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) 32 (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
33 33
34#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
35
34struct i915_pmu_sample { 36struct i915_pmu_sample {
35 u64 cur; 37 u64 cur;
36}; 38};
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0a7d60509ca7..067054cf4a86 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1790,7 +1790,7 @@ enum i915_power_well_id {
1790#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 1790#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40
1791#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 1791#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40
1792#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 1792#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840
1793#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ 1793#define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \
1794 _CNL_PORT_TX_AE_GRP_OFFSET, \ 1794 _CNL_PORT_TX_AE_GRP_OFFSET, \
1795 _CNL_PORT_TX_B_GRP_OFFSET, \ 1795 _CNL_PORT_TX_B_GRP_OFFSET, \
1796 _CNL_PORT_TX_B_GRP_OFFSET, \ 1796 _CNL_PORT_TX_B_GRP_OFFSET, \
@@ -1798,7 +1798,7 @@ enum i915_power_well_id {
1798 _CNL_PORT_TX_AE_GRP_OFFSET, \ 1798 _CNL_PORT_TX_AE_GRP_OFFSET, \
1799 _CNL_PORT_TX_F_GRP_OFFSET) + \ 1799 _CNL_PORT_TX_F_GRP_OFFSET) + \
1800 4 * (dw)) 1800 4 * (dw))
1801#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ 1801#define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \
1802 _CNL_PORT_TX_AE_LN0_OFFSET, \ 1802 _CNL_PORT_TX_AE_LN0_OFFSET, \
1803 _CNL_PORT_TX_B_LN0_OFFSET, \ 1803 _CNL_PORT_TX_B_LN0_OFFSET, \
1804 _CNL_PORT_TX_B_LN0_OFFSET, \ 1804 _CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1834,9 +1834,9 @@ enum i915_power_well_id {
1834 1834
1835#define _CNL_PORT_TX_DW4_LN0_AE 0x162450 1835#define _CNL_PORT_TX_DW4_LN0_AE 0x162450
1836#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 1836#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
1837#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) 1837#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
1838#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) 1838#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
1839#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ 1839#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
1840 ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ 1840 ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
1841 _CNL_PORT_TX_DW4_LN0_AE))) 1841 _CNL_PORT_TX_DW4_LN0_AE)))
1842#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) 1842#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
@@ -1864,8 +1864,12 @@ enum i915_power_well_id {
1864#define RTERM_SELECT(x) ((x) << 3) 1864#define RTERM_SELECT(x) ((x) << 3)
1865#define RTERM_SELECT_MASK (0x7 << 3) 1865#define RTERM_SELECT_MASK (0x7 << 3)
1866 1866
1867#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) 1867#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
1868#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) 1868#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
1869#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
1870#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
1871#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
1872#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
1869#define N_SCALAR(x) ((x) << 24) 1873#define N_SCALAR(x) ((x) << 24)
1870#define N_SCALAR_MASK (0x7F << 24) 1874#define N_SCALAR_MASK (0x7F << 24)
1871 1875
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4079050f9d6c..7edce1b7b348 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
494 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ 494 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
495}; 495};
496 496
497struct icl_combo_phy_ddi_buf_trans { 497/* icl_combo_phy_ddi_translations */
498 u32 dw2_swing_select; 498static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
499 u32 dw2_swing_scalar; 499 /* NT mV Trans mV db */
500 u32 dw4_scaling; 500 { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
501}; 501 { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
502 502 { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
503/* Voltage Swing Programming for VccIO 0.85V for DP */ 503 { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
504static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { 504 { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
505 /* Voltage mV db */ 505 { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
506 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ 506 { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
507 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ 507 { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
508 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ 508 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
509 { 0x2, 0x98, 0x900F }, /* 400 9.5 */ 509 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
510 { 0xB, 0x70, 0x0018 }, /* 600 0.0 */
511 { 0xB, 0x70, 0x3015 }, /* 600 3.5 */
512 { 0xB, 0x70, 0x6012 }, /* 600 6.0 */
513 { 0x5, 0x00, 0x0018 }, /* 800 0.0 */
514 { 0x5, 0x00, 0x3015 }, /* 800 3.5 */
515 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
516};
517
518/* FIXME - After table is updated in Bspec */
519/* Voltage Swing Programming for VccIO 0.85V for eDP */
520static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
521 /* Voltage mV db */
522 { 0x0, 0x00, 0x00 }, /* 200 0.0 */
523 { 0x0, 0x00, 0x00 }, /* 200 1.5 */
524 { 0x0, 0x00, 0x00 }, /* 200 4.0 */
525 { 0x0, 0x00, 0x00 }, /* 200 6.0 */
526 { 0x0, 0x00, 0x00 }, /* 250 0.0 */
527 { 0x0, 0x00, 0x00 }, /* 250 1.5 */
528 { 0x0, 0x00, 0x00 }, /* 250 4.0 */
529 { 0x0, 0x00, 0x00 }, /* 300 0.0 */
530 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
531 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
532};
533
534/* Voltage Swing Programming for VccIO 0.95V for DP */
535static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
536 /* Voltage mV db */
537 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
538 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
539 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
540 { 0x2, 0x98, 0x900F }, /* 400 9.5 */
541 { 0x4, 0x98, 0x0018 }, /* 600 0.0 */
542 { 0x4, 0x98, 0x3015 }, /* 600 3.5 */
543 { 0x4, 0x98, 0x6012 }, /* 600 6.0 */
544 { 0x5, 0x76, 0x0018 }, /* 800 0.0 */
545 { 0x5, 0x76, 0x3015 }, /* 800 3.5 */
546 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
547}; 510};
548 511
549/* FIXME - After table is updated in Bspec */ 512static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
550/* Voltage Swing Programming for VccIO 0.95V for eDP */ 513 /* NT mV Trans mV db */
551static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { 514 { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
552 /* Voltage mV db */ 515 { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
553 { 0x0, 0x00, 0x00 }, /* 200 0.0 */ 516 { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
554 { 0x0, 0x00, 0x00 }, /* 200 1.5 */ 517 { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
555 { 0x0, 0x00, 0x00 }, /* 200 4.0 */ 518 { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
556 { 0x0, 0x00, 0x00 }, /* 200 6.0 */ 519 { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
557 { 0x0, 0x00, 0x00 }, /* 250 0.0 */ 520 { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
558 { 0x0, 0x00, 0x00 }, /* 250 1.5 */ 521 { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
559 { 0x0, 0x00, 0x00 }, /* 250 4.0 */ 522 { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
560 { 0x0, 0x00, 0x00 }, /* 300 0.0 */ 523 { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
561 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
562 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
563}; 524};
564 525
565/* Voltage Swing Programming for VccIO 1.05V for DP */ 526static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
566static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { 527 /* NT mV Trans mV db */
567 /* Voltage mV db */ 528 { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
568 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ 529 { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
569 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ 530 { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
570 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ 531 { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
571 { 0x2, 0x98, 0x900F }, /* 400 9.5 */ 532 { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
572 { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ 533 { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
573 { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ 534 { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
574 { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ 535 { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
575 { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ 536 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
576 { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ 537 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
577 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
578}; 538};
579 539
580/* FIXME - After table is updated in Bspec */ 540static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
581/* Voltage Swing Programming for VccIO 1.05V for eDP */ 541 /* NT mV Trans mV db */
582static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { 542 { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
583 /* Voltage mV db */ 543 { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
584 { 0x0, 0x00, 0x00 }, /* 200 0.0 */ 544 { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
585 { 0x0, 0x00, 0x00 }, /* 200 1.5 */ 545 { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
586 { 0x0, 0x00, 0x00 }, /* 200 4.0 */ 546 { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
587 { 0x0, 0x00, 0x00 }, /* 200 6.0 */ 547 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
588 { 0x0, 0x00, 0x00 }, /* 250 0.0 */ 548 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
589 { 0x0, 0x00, 0x00 }, /* 250 1.5 */
590 { 0x0, 0x00, 0x00 }, /* 250 4.0 */
591 { 0x0, 0x00, 0x00 }, /* 300 0.0 */
592 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
593 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
594}; 549};
595 550
596struct icl_mg_phy_ddi_buf_trans { 551struct icl_mg_phy_ddi_buf_trans {
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
871 } 826 }
872} 827}
873 828
874static const struct icl_combo_phy_ddi_buf_trans * 829static const struct cnl_ddi_buf_trans *
875icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, 830icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
876 int type, int *n_entries) 831 int type, int rate, int *n_entries)
877{ 832{
878 u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; 833 if (type == INTEL_OUTPUT_HDMI) {
879 834 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
880 if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { 835 return icl_combo_phy_ddi_translations_hdmi;
881 switch (voltage) { 836 } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) {
882 case VOLTAGE_INFO_0_85V: 837 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
883 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); 838 return icl_combo_phy_ddi_translations_edp_hbr3;
884 return icl_combo_phy_ddi_translations_edp_0_85V; 839 } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
885 case VOLTAGE_INFO_0_95V: 840 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
886 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); 841 return icl_combo_phy_ddi_translations_edp_hbr2;
887 return icl_combo_phy_ddi_translations_edp_0_95V;
888 case VOLTAGE_INFO_1_05V:
889 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
890 return icl_combo_phy_ddi_translations_edp_1_05V;
891 default:
892 MISSING_CASE(voltage);
893 return NULL;
894 }
895 } else {
896 switch (voltage) {
897 case VOLTAGE_INFO_0_85V:
898 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
899 return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
900 case VOLTAGE_INFO_0_95V:
901 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
902 return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
903 case VOLTAGE_INFO_1_05V:
904 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
905 return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
906 default:
907 MISSING_CASE(voltage);
908 return NULL;
909 }
910 } 842 }
843
844 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
845 return icl_combo_phy_ddi_translations_dp_hbr2;
911} 846}
912 847
913static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) 848static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
918 853
919 if (IS_ICELAKE(dev_priv)) { 854 if (IS_ICELAKE(dev_priv)) {
920 if (intel_port_is_combophy(dev_priv, port)) 855 if (intel_port_is_combophy(dev_priv, port))
921 icl_get_combo_buf_trans(dev_priv, port, 856 icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
922 INTEL_OUTPUT_HDMI, &n_entries); 857 0, &n_entries);
923 else 858 else
924 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); 859 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
925 default_entry = n_entries - 1; 860 default_entry = n_entries - 1;
@@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
2275u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) 2210u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
2276{ 2211{
2277 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2212 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2213 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2278 enum port port = encoder->port; 2214 enum port port = encoder->port;
2279 int n_entries; 2215 int n_entries;
2280 2216
2281 if (IS_ICELAKE(dev_priv)) { 2217 if (IS_ICELAKE(dev_priv)) {
2282 if (intel_port_is_combophy(dev_priv, port)) 2218 if (intel_port_is_combophy(dev_priv, port))
2283 icl_get_combo_buf_trans(dev_priv, port, encoder->type, 2219 icl_get_combo_buf_trans(dev_priv, port, encoder->type,
2284 &n_entries); 2220 intel_dp->link_rate, &n_entries);
2285 else 2221 else
2286 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); 2222 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
2287 } else if (IS_CANNONLAKE(dev_priv)) { 2223 } else if (IS_CANNONLAKE(dev_priv)) {
@@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
2462} 2398}
2463 2399
2464static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, 2400static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2465 u32 level, enum port port, int type) 2401 u32 level, enum port port, int type,
2402 int rate)
2466{ 2403{
2467 const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; 2404 const struct cnl_ddi_buf_trans *ddi_translations = NULL;
2468 u32 n_entries, val; 2405 u32 n_entries, val;
2469 int ln; 2406 int ln;
2470 2407
2471 ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, 2408 ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
2472 &n_entries); 2409 rate, &n_entries);
2473 if (!ddi_translations) 2410 if (!ddi_translations)
2474 return; 2411 return;
2475 2412
@@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2478 level = n_entries - 1; 2415 level = n_entries - 1;
2479 } 2416 }
2480 2417
2481 /* Set PORT_TX_DW5 Rterm Sel to 110b. */ 2418 /* Set PORT_TX_DW5 */
2482 val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); 2419 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
2483 val &= ~RTERM_SELECT_MASK; 2420 val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
2421 TAP2_DISABLE | TAP3_DISABLE);
2422 val |= SCALING_MODE_SEL(0x2);
2484 val |= RTERM_SELECT(0x6); 2423 val |= RTERM_SELECT(0x6);
2485 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2424 val |= TAP3_DISABLE;
2486
2487 /* Program PORT_TX_DW5 */
2488 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
2489 /* Set DisableTap2 and DisableTap3 if MIPI DSI
2490 * Clear DisableTap2 and DisableTap3 for all other Ports
2491 */
2492 if (type == INTEL_OUTPUT_DSI) {
2493 val |= TAP2_DISABLE;
2494 val |= TAP3_DISABLE;
2495 } else {
2496 val &= ~TAP2_DISABLE;
2497 val &= ~TAP3_DISABLE;
2498 }
2499 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2425 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
2500 2426
2501 /* Program PORT_TX_DW2 */ 2427 /* Program PORT_TX_DW2 */
2502 val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); 2428 val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
2503 val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | 2429 val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
2504 RCOMP_SCALAR_MASK); 2430 RCOMP_SCALAR_MASK);
2505 val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); 2431 val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
2506 val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); 2432 val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
2507 /* Program Rcomp scalar for every table entry */ 2433 /* Program Rcomp scalar for every table entry */
2508 val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); 2434 val |= RCOMP_SCALAR(0x98);
2509 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); 2435 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
2510 2436
2511 /* Program PORT_TX_DW4 */ 2437 /* Program PORT_TX_DW4 */
@@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2514 val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); 2440 val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
2515 val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | 2441 val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
2516 CURSOR_COEFF_MASK); 2442 CURSOR_COEFF_MASK);
2517 val |= ddi_translations[level].dw4_scaling; 2443 val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
2444 val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
2445 val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
2518 I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); 2446 I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
2519 } 2447 }
2448
2449 /* Program PORT_TX_DW7 */
2450 val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
2451 val &= ~N_SCALAR_MASK;
2452 val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
2453 I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
2520} 2454}
2521 2455
2522static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, 2456static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
2581 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2515 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
2582 2516
2583 /* 5. Program swing and de-emphasis */ 2517 /* 5. Program swing and de-emphasis */
2584 icl_ddi_combo_vswing_program(dev_priv, level, port, type); 2518 icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
2585 2519
2586 /* 6. Set training enable to trigger update */ 2520 /* 6. Set training enable to trigger update */
2587 val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); 2521 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fdd2cbc56fa3..22a74608c6e4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
304static int icl_max_source_rate(struct intel_dp *intel_dp) 304static int icl_max_source_rate(struct intel_dp *intel_dp)
305{ 305{
306 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 306 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
307 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
307 enum port port = dig_port->base.port; 308 enum port port = dig_port->base.port;
308 309
309 if (port == PORT_B) 310 if (intel_port_is_combophy(dev_priv, port) &&
311 !intel_dp_is_edp(intel_dp))
310 return 540000; 312 return 540000;
311 313
312 return 810000; 314 return 810000;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f94a04b4ad87..e9ddeaf05a14 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,16 @@ struct intel_fbdev {
209 unsigned long vma_flags; 209 unsigned long vma_flags;
210 async_cookie_t cookie; 210 async_cookie_t cookie;
211 int preferred_bpp; 211 int preferred_bpp;
212
213 /* Whether or not fbdev hpd processing is temporarily suspended */
214 bool hpd_suspended : 1;
215 /* Set when a hotplug was received while HPD processing was
216 * suspended
217 */
218 bool hpd_waiting : 1;
219
220 /* Protects hpd_suspended */
221 struct mutex hpd_lock;
212}; 222};
213 223
214struct intel_encoder { 224struct intel_encoder {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fb5bb5b32a60..4ee16b264dbe 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
336 bool *enabled, int width, int height) 336 bool *enabled, int width, int height)
337{ 337{
338 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); 338 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
339 unsigned long conn_configured, conn_seq, mask;
340 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); 339 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
340 unsigned long conn_configured, conn_seq;
341 int i, j; 341 int i, j;
342 bool *save_enabled; 342 bool *save_enabled;
343 bool fallback = true, ret = true; 343 bool fallback = true, ret = true;
@@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
355 drm_modeset_backoff(&ctx); 355 drm_modeset_backoff(&ctx);
356 356
357 memcpy(save_enabled, enabled, count); 357 memcpy(save_enabled, enabled, count);
358 mask = GENMASK(count - 1, 0); 358 conn_seq = GENMASK(count - 1, 0);
359 conn_configured = 0; 359 conn_configured = 0;
360retry: 360retry:
361 conn_seq = conn_configured;
362 for (i = 0; i < count; i++) { 361 for (i = 0; i < count; i++) {
363 struct drm_fb_helper_connector *fb_conn; 362 struct drm_fb_helper_connector *fb_conn;
364 struct drm_connector *connector; 363 struct drm_connector *connector;
@@ -371,7 +370,8 @@ retry:
371 if (conn_configured & BIT(i)) 370 if (conn_configured & BIT(i))
372 continue; 371 continue;
373 372
374 if (conn_seq == 0 && !connector->has_tile) 373 /* First pass, only consider tiled connectors */
374 if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
375 continue; 375 continue;
376 376
377 if (connector->status == connector_status_connected) 377 if (connector->status == connector_status_connected)
@@ -475,8 +475,10 @@ retry:
475 conn_configured |= BIT(i); 475 conn_configured |= BIT(i);
476 } 476 }
477 477
478 if ((conn_configured & mask) != mask && conn_configured != conn_seq) 478 if (conn_configured != conn_seq) { /* repeat until no more are found */
479 conn_seq = conn_configured;
479 goto retry; 480 goto retry;
481 }
480 482
481 /* 483 /*
482 * If the BIOS didn't enable everything it could, fall back to have the 484 * If the BIOS didn't enable everything it could, fall back to have the
@@ -679,6 +681,7 @@ int intel_fbdev_init(struct drm_device *dev)
679 if (ifbdev == NULL) 681 if (ifbdev == NULL)
680 return -ENOMEM; 682 return -ENOMEM;
681 683
684 mutex_init(&ifbdev->hpd_lock);
682 drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); 685 drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
683 686
684 if (!intel_fbdev_init_bios(dev, ifbdev)) 687 if (!intel_fbdev_init_bios(dev, ifbdev))
@@ -752,6 +755,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
752 intel_fbdev_destroy(ifbdev); 755 intel_fbdev_destroy(ifbdev);
753} 756}
754 757
758/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
759 * processing, fbdev will perform a full connector reprobe if a hotplug event
760 * was received while HPD was suspended.
761 */
762static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
763{
764 bool send_hpd = false;
765
766 mutex_lock(&ifbdev->hpd_lock);
767 ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
768 send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
769 ifbdev->hpd_waiting = false;
770 mutex_unlock(&ifbdev->hpd_lock);
771
772 if (send_hpd) {
773 DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
774 drm_fb_helper_hotplug_event(&ifbdev->helper);
775 }
776}
777
755void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) 778void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
756{ 779{
757 struct drm_i915_private *dev_priv = to_i915(dev); 780 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -773,6 +796,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
773 */ 796 */
774 if (state != FBINFO_STATE_RUNNING) 797 if (state != FBINFO_STATE_RUNNING)
775 flush_work(&dev_priv->fbdev_suspend_work); 798 flush_work(&dev_priv->fbdev_suspend_work);
799
776 console_lock(); 800 console_lock();
777 } else { 801 } else {
778 /* 802 /*
@@ -800,17 +824,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
800 824
801 drm_fb_helper_set_suspend(&ifbdev->helper, state); 825 drm_fb_helper_set_suspend(&ifbdev->helper, state);
802 console_unlock(); 826 console_unlock();
827
828 intel_fbdev_hpd_set_suspend(ifbdev, state);
803} 829}
804 830
805void intel_fbdev_output_poll_changed(struct drm_device *dev) 831void intel_fbdev_output_poll_changed(struct drm_device *dev)
806{ 832{
807 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 833 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
834 bool send_hpd;
808 835
809 if (!ifbdev) 836 if (!ifbdev)
810 return; 837 return;
811 838
812 intel_fbdev_sync(ifbdev); 839 intel_fbdev_sync(ifbdev);
813 if (ifbdev->vma || ifbdev->helper.deferred_setup) 840
841 mutex_lock(&ifbdev->hpd_lock);
842 send_hpd = !ifbdev->hpd_suspended;
843 ifbdev->hpd_waiting = true;
844 mutex_unlock(&ifbdev->hpd_lock);
845
846 if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
814 drm_fb_helper_hotplug_event(&ifbdev->helper); 847 drm_fb_helper_hotplug_event(&ifbdev->helper);
815} 848}
816 849
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b8f106d9ecf8..3ac20153705a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -55,7 +55,12 @@
55struct opregion_header { 55struct opregion_header {
56 u8 signature[16]; 56 u8 signature[16];
57 u32 size; 57 u32 size;
58 u32 opregion_ver; 58 struct {
59 u8 rsvd;
60 u8 revision;
61 u8 minor;
62 u8 major;
63 } __packed over;
59 u8 bios_ver[32]; 64 u8 bios_ver[32];
60 u8 vbios_ver[16]; 65 u8 vbios_ver[16];
61 u8 driver_ver[16]; 66 u8 driver_ver[16];
@@ -119,7 +124,8 @@ struct opregion_asle {
119 u64 fdss; 124 u64 fdss;
120 u32 fdsp; 125 u32 fdsp;
121 u32 stat; 126 u32 stat;
122 u64 rvda; /* Physical address of raw vbt data */ 127 u64 rvda; /* Physical (2.0) or relative from opregion (2.1+)
128 * address of raw VBT data. */
123 u32 rvds; /* Size of raw vbt data */ 129 u32 rvds; /* Size of raw vbt data */
124 u8 rsvd[58]; 130 u8 rsvd[58];
125} __packed; 131} __packed;
@@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
925 opregion->header = base; 931 opregion->header = base;
926 opregion->lid_state = base + ACPI_CLID; 932 opregion->lid_state = base + ACPI_CLID;
927 933
934 DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
935 opregion->header->over.major,
936 opregion->header->over.minor,
937 opregion->header->over.revision);
938
928 mboxes = opregion->header->mboxes; 939 mboxes = opregion->header->mboxes;
929 if (mboxes & MBOX_ACPI) { 940 if (mboxes & MBOX_ACPI) {
930 DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); 941 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
@@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
953 if (dmi_check_system(intel_no_opregion_vbt)) 964 if (dmi_check_system(intel_no_opregion_vbt))
954 goto out; 965 goto out;
955 966
956 if (opregion->header->opregion_ver >= 2 && opregion->asle && 967 if (opregion->header->over.major >= 2 && opregion->asle &&
957 opregion->asle->rvda && opregion->asle->rvds) { 968 opregion->asle->rvda && opregion->asle->rvds) {
958 opregion->rvda = memremap(opregion->asle->rvda, 969 resource_size_t rvda = opregion->asle->rvda;
959 opregion->asle->rvds, 970
971 /*
972 * opregion 2.0: rvda is the physical VBT address.
973 *
974 * opregion 2.1+: rvda is unsigned, relative offset from
975 * opregion base, and should never point within opregion.
976 */
977 if (opregion->header->over.major > 2 ||
978 opregion->header->over.minor >= 1) {
979 WARN_ON(rvda < OPREGION_SIZE);
980
981 rvda += asls;
982 }
983
984 opregion->rvda = memremap(rvda, opregion->asle->rvds,
960 MEMREMAP_WB); 985 MEMREMAP_WB);
986
961 vbt = opregion->rvda; 987 vbt = opregion->rvda;
962 vbt_size = opregion->asle->rvds; 988 vbt_size = opregion->asle->rvds;
963 if (intel_bios_is_valid_vbt(vbt, vbt_size)) { 989 if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
@@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
967 goto out; 993 goto out;
968 } else { 994 } else {
969 DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); 995 DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
996 memunmap(opregion->rvda);
997 opregion->rvda = NULL;
970 } 998 }
971 } 999 }
972 1000
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 72edaa7ff411..a1a7cc29fdd1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -415,16 +415,17 @@ struct intel_engine_cs {
415 /** 415 /**
416 * @enable_count: Reference count for the enabled samplers. 416 * @enable_count: Reference count for the enabled samplers.
417 * 417 *
418 * Index number corresponds to the bit number from @enable. 418 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
419 */ 419 */
420 unsigned int enable_count[I915_PMU_SAMPLE_BITS]; 420 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
421 /** 421 /**
422 * @sample: Counter values for sampling events. 422 * @sample: Counter values for sampling events.
423 * 423 *
424 * Our internal timer stores the current counters in this field. 424 * Our internal timer stores the current counters in this field.
425 *
426 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
425 */ 427 */
426#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) 428 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
427 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
428 } pmu; 429 } pmu;
429 430
430 /* 431 /*
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 2c5bbe317353..e31e263cf86b 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
643 int bus_format; 643 int bus_format;
644 644
645 ret = of_property_read_u32(child, "reg", &i); 645 ret = of_property_read_u32(child, "reg", &i);
646 if (ret || i < 0 || i > 1) 646 if (ret || i < 0 || i > 1) {
647 return -EINVAL; 647 ret = -EINVAL;
648 goto free_child;
649 }
648 650
649 if (!of_device_is_available(child)) 651 if (!of_device_is_available(child))
650 continue; 652 continue;
@@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
657 channel = &imx_ldb->channel[i]; 659 channel = &imx_ldb->channel[i];
658 channel->ldb = imx_ldb; 660 channel->ldb = imx_ldb;
659 channel->chno = i; 661 channel->chno = i;
660 channel->child = child;
661 662
662 /* 663 /*
663 * The output port is port@4 with an external 4-port mux or 664 * The output port is port@4 with an external 4-port mux or
@@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
667 imx_ldb->lvds_mux ? 4 : 2, 0, 668 imx_ldb->lvds_mux ? 4 : 2, 0,
668 &channel->panel, &channel->bridge); 669 &channel->panel, &channel->bridge);
669 if (ret && ret != -ENODEV) 670 if (ret && ret != -ENODEV)
670 return ret; 671 goto free_child;
671 672
672 /* panel ddc only if there is no bridge */ 673 /* panel ddc only if there is no bridge */
673 if (!channel->bridge) { 674 if (!channel->bridge) {
674 ret = imx_ldb_panel_ddc(dev, channel, child); 675 ret = imx_ldb_panel_ddc(dev, channel, child);
675 if (ret) 676 if (ret)
676 return ret; 677 goto free_child;
677 } 678 }
678 679
679 bus_format = of_get_bus_format(dev, child); 680 bus_format = of_get_bus_format(dev, child);
@@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
689 if (bus_format < 0) { 690 if (bus_format < 0) {
690 dev_err(dev, "could not determine data mapping: %d\n", 691 dev_err(dev, "could not determine data mapping: %d\n",
691 bus_format); 692 bus_format);
692 return bus_format; 693 ret = bus_format;
694 goto free_child;
693 } 695 }
694 channel->bus_format = bus_format; 696 channel->bus_format = bus_format;
697 channel->child = child;
695 698
696 ret = imx_ldb_register(drm, channel); 699 ret = imx_ldb_register(drm, channel);
697 if (ret) 700 if (ret) {
698 return ret; 701 channel->child = NULL;
702 goto free_child;
703 }
699 } 704 }
700 705
701 dev_set_drvdata(dev, imx_ldb); 706 dev_set_drvdata(dev, imx_ldb);
702 707
703 return 0; 708 return 0;
709
710free_child:
711 of_node_put(child);
712 return ret;
704} 713}
705 714
706static void imx_ldb_unbind(struct device *dev, struct device *master, 715static void imx_ldb_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index c390924de93d..21e964f6ab5c 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
370 if (ret) 370 if (ret)
371 return ret; 371 return ret;
372 372
373 /* CRTC should be enabled */ 373 /* nothing to check when disabling or disabled */
374 if (!crtc_state->enable) 374 if (!crtc_state->enable)
375 return -EINVAL; 375 return 0;
376 376
377 switch (plane->type) { 377 switch (plane->type) {
378 case DRM_PLANE_TYPE_PRIMARY: 378 case DRM_PLANE_TYPE_PRIMARY:
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dec1e081f529..6a8fb6fd183c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
172 } 172 }
173 173
174 if (radeon_is_px(dev)) { 174 if (radeon_is_px(dev)) {
175 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
175 pm_runtime_use_autosuspend(dev->dev); 176 pm_runtime_use_autosuspend(dev->dev);
176 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 177 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
177 pm_runtime_set_active(dev->dev); 178 pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 4463d3826ecb..e2942c9a11a7 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -440,13 +440,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
440 440
441 while ((entity->dependency = 441 while ((entity->dependency =
442 sched->ops->dependency(sched_job, entity))) { 442 sched->ops->dependency(sched_job, entity))) {
443 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
443 444
444 if (drm_sched_entity_add_dependency_cb(entity)) { 445 if (drm_sched_entity_add_dependency_cb(entity))
445
446 trace_drm_sched_job_wait_dep(sched_job,
447 entity->dependency);
448 return NULL; 446 return NULL;
449 }
450 } 447 }
451 448
452 /* skip jobs from entity that marked guilty */ 449 /* skip jobs from entity that marked guilty */
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
index 9d9e8146db90..d7b409a3c0f8 100644
--- a/drivers/gpu/drm/vkms/vkms_crc.c
+++ b/drivers/gpu/drm/vkms/vkms_crc.c
@@ -1,4 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2
2#include "vkms_drv.h" 3#include "vkms_drv.h"
3#include <linux/crc32.h> 4#include <linux/crc32.h>
4#include <drm/drm_atomic.h> 5#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 177bbcb38306..eb56ee893761 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include "vkms_drv.h" 3#include "vkms_drv.h"
10#include <drm/drm_atomic_helper.h> 4#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 83087877565c..7dcbecb5fac2 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -1,9 +1,4 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 */
7 2
8/** 3/**
9 * DOC: vkms (Virtual Kernel Modesetting) 4 * DOC: vkms (Virtual Kernel Modesetting)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index e4469cd3d254..81f1cfbeb936 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -1,3 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2
1#ifndef _VKMS_DRV_H_ 3#ifndef _VKMS_DRV_H_
2#define _VKMS_DRV_H_ 4#define _VKMS_DRV_H_
3 5
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 80311daed47a..138b0bb325cf 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include <linux/shmem_fs.h> 3#include <linux/shmem_fs.h>
10 4
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 271a0eb9042c..4173e4f48334 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include "vkms_drv.h" 3#include "vkms_drv.h"
10#include <drm/drm_crtc_helper.h> 4#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 418817600ad1..0e67d2d42f0c 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include "vkms_drv.h" 3#include "vkms_drv.h"
10#include <drm/drm_plane_helper.h> 4#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 474b00e19697..0a7d4395d427 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = {
898 .cpmem_ofs = 0x1f000000, 898 .cpmem_ofs = 0x1f000000,
899 .srm_ofs = 0x1f040000, 899 .srm_ofs = 0x1f040000,
900 .tpm_ofs = 0x1f060000, 900 .tpm_ofs = 0x1f060000,
901 .csi0_ofs = 0x1f030000, 901 .csi0_ofs = 0x1e030000,
902 .csi1_ofs = 0x1f038000, 902 .csi1_ofs = 0x1e038000,
903 .ic_ofs = 0x1e020000, 903 .ic_ofs = 0x1e020000,
904 .disp0_ofs = 0x1e040000, 904 .disp0_ofs = 0x1e040000,
905 .disp1_ofs = 0x1e048000, 905 .disp1_ofs = 0x1e048000,
@@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = {
914 .cpmem_ofs = 0x07000000, 914 .cpmem_ofs = 0x07000000,
915 .srm_ofs = 0x07040000, 915 .srm_ofs = 0x07040000,
916 .tpm_ofs = 0x07060000, 916 .tpm_ofs = 0x07060000,
917 .csi0_ofs = 0x07030000, 917 .csi0_ofs = 0x06030000,
918 .csi1_ofs = 0x07038000, 918 .csi1_ofs = 0x06038000,
919 .ic_ofs = 0x06020000, 919 .ic_ofs = 0x06020000,
920 .disp0_ofs = 0x06040000, 920 .disp0_ofs = 0x06040000,
921 .disp1_ofs = 0x06048000, 921 .disp1_ofs = 0x06048000,
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 2f8db9d62551..4a28f3fbb0a2 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -106,6 +106,7 @@ struct ipu_pre {
106 void *buffer_virt; 106 void *buffer_virt;
107 bool in_use; 107 bool in_use;
108 unsigned int safe_window_end; 108 unsigned int safe_window_end;
109 unsigned int last_bufaddr;
109}; 110};
110 111
111static DEFINE_MUTEX(ipu_pre_list_mutex); 112static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
185 186
186 writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); 187 writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
187 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); 188 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
189 pre->last_bufaddr = bufaddr;
188 190
189 val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | 191 val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) |
190 IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | 192 IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) |
@@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
242 unsigned short current_yblock; 244 unsigned short current_yblock;
243 u32 val; 245 u32 val;
244 246
247 if (bufaddr == pre->last_bufaddr)
248 return;
249
245 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); 250 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
251 pre->last_bufaddr = bufaddr;
246 252
247 do { 253 do {
248 if (time_after(jiffies, timeout)) { 254 if (time_after(jiffies, timeout)) {
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 4adec4ab7d06..59ee01f3d022 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -3594,7 +3594,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
3594 fan5pin |= cr1b & BIT(5); 3594 fan5pin |= cr1b & BIT(5);
3595 fan5pin |= creb & BIT(5); 3595 fan5pin |= creb & BIT(5);
3596 3596
3597 fan6pin = creb & BIT(3); 3597 fan6pin = !dsw_en && (cr2d & BIT(1));
3598 fan6pin |= creb & BIT(3);
3598 3599
3599 pwm5pin |= cr2d & BIT(7); 3600 pwm5pin |= cr2d & BIT(7);
3600 pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); 3601 pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0));
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index ec6e69aa3a8e..d2fbb4bb4a43 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
183 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); 183 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
184} 184}
185 185
186static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev)
187{
188 i2c_dev->curr_msg = NULL;
189 i2c_dev->num_msgs = 0;
190
191 i2c_dev->msg_buf = NULL;
192 i2c_dev->msg_buf_remaining = 0;
193}
194
186/* 195/*
187 * Note about I2C_C_CLEAR on error: 196 * Note about I2C_C_CLEAR on error:
188 * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in 197 * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
@@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
283 292
284 time_left = wait_for_completion_timeout(&i2c_dev->completion, 293 time_left = wait_for_completion_timeout(&i2c_dev->completion,
285 adap->timeout); 294 adap->timeout);
295
296 bcm2835_i2c_finish_transfer(i2c_dev);
297
286 if (!time_left) { 298 if (!time_left) {
287 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 299 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
288 BCM2835_I2C_C_CLEAR); 300 BCM2835_I2C_C_CLEAR);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index b13605718291..d917cefc5a19 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
382 * Check for the message size against FIFO depth and set the 382 * Check for the message size against FIFO depth and set the
383 * 'hold bus' bit if it is greater than FIFO depth. 383 * 'hold bus' bit if it is greater than FIFO depth.
384 */ 384 */
385 if (id->recv_count > CDNS_I2C_FIFO_DEPTH) 385 if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
386 ctrl_reg |= CDNS_I2C_CR_HOLD; 386 ctrl_reg |= CDNS_I2C_CR_HOLD;
387 else
388 ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
387 389
388 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); 390 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
389 391
@@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
440 * Check for the message size against FIFO depth and set the 442 * Check for the message size against FIFO depth and set the
441 * 'hold bus' bit if it is greater than FIFO depth. 443 * 'hold bus' bit if it is greater than FIFO depth.
442 */ 444 */
443 if (id->send_count > CDNS_I2C_FIFO_DEPTH) 445 if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
444 ctrl_reg |= CDNS_I2C_CR_HOLD; 446 ctrl_reg |= CDNS_I2C_CR_HOLD;
447 else
448 ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
449
445 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); 450 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
446 451
447 /* Clear the interrupts in interrupt status register. */ 452 /* Clear the interrupts in interrupt status register. */
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index c13c0ba30f63..d499cd61c0e8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -783,6 +783,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
783static int c4iw_rdev_open(struct c4iw_rdev *rdev) 783static int c4iw_rdev_open(struct c4iw_rdev *rdev)
784{ 784{
785 int err; 785 int err;
786 unsigned int factor;
786 787
787 c4iw_init_dev_ucontext(rdev, &rdev->uctx); 788 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
788 789
@@ -806,8 +807,18 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
806 return -EINVAL; 807 return -EINVAL;
807 } 808 }
808 809
809 rdev->qpmask = rdev->lldi.udb_density - 1; 810 /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
810 rdev->cqmask = rdev->lldi.ucq_density - 1; 811 if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
812 pr_err("%s: unsupported sge host page size %u\n",
813 pci_name(rdev->lldi.pdev),
814 rdev->lldi.sge_host_page_size);
815 return -EINVAL;
816 }
817
818 factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
819 rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
820 rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
821
811 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n", 822 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
812 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, 823 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
813 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), 824 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 31d91538bbf4..694324b37480 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3032,7 +3032,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
3032{ 3032{
3033 struct srp_target_port *target = host_to_target(scmnd->device->host); 3033 struct srp_target_port *target = host_to_target(scmnd->device->host);
3034 struct srp_rdma_ch *ch; 3034 struct srp_rdma_ch *ch;
3035 int i, j;
3036 u8 status; 3035 u8 status;
3037 3036
3038 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 3037 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -3044,15 +3043,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
3044 if (status) 3043 if (status)
3045 return FAILED; 3044 return FAILED;
3046 3045
3047 for (i = 0; i < target->ch_count; i++) {
3048 ch = &target->ch[i];
3049 for (j = 0; j < target->req_ring_size; ++j) {
3050 struct srp_request *req = &ch->req_ring[j];
3051
3052 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
3053 }
3054 }
3055
3056 return SUCCESS; 3046 return SUCCESS;
3057} 3047}
3058 3048
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4713957b0cbb..a878351f1643 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121
420 420
421config KEYBOARD_SNVS_PWRKEY 421config KEYBOARD_SNVS_PWRKEY
422 tristate "IMX SNVS Power Key Driver" 422 tristate "IMX SNVS Power Key Driver"
423 depends on SOC_IMX6SX 423 depends on SOC_IMX6SX || SOC_IMX7D
424 depends on OF 424 depends on OF
425 help 425 help
426 This is the snvs powerkey driver for the Freescale i.MX application 426 This is the snvs powerkey driver for the Freescale i.MX application
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 312916f99597..73686c2460ce 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -75,9 +75,7 @@
75struct cap11xx_led { 75struct cap11xx_led {
76 struct cap11xx_priv *priv; 76 struct cap11xx_priv *priv;
77 struct led_classdev cdev; 77 struct led_classdev cdev;
78 struct work_struct work;
79 u32 reg; 78 u32 reg;
80 enum led_brightness new_brightness;
81}; 79};
82#endif 80#endif
83 81
@@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev)
233} 231}
234 232
235#ifdef CONFIG_LEDS_CLASS 233#ifdef CONFIG_LEDS_CLASS
236static void cap11xx_led_work(struct work_struct *work) 234static int cap11xx_led_set(struct led_classdev *cdev,
235 enum led_brightness value)
237{ 236{
238 struct cap11xx_led *led = container_of(work, struct cap11xx_led, work); 237 struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
239 struct cap11xx_priv *priv = led->priv; 238 struct cap11xx_priv *priv = led->priv;
240 int value = led->new_brightness;
241 239
242 /* 240 /*
243 * All LEDs share the same duty cycle as this is a HW limitation. 241 * All LEDs share the same duty cycle as this is a HW
244 * Brightness levels per LED are either 0 (OFF) and 1 (ON). 242 * limitation. Brightness levels per LED are either
243 * 0 (OFF) and 1 (ON).
245 */ 244 */
246 regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL, 245 return regmap_update_bits(priv->regmap,
247 BIT(led->reg), value ? BIT(led->reg) : 0); 246 CAP11XX_REG_LED_OUTPUT_CONTROL,
248} 247 BIT(led->reg),
249 248 value ? BIT(led->reg) : 0);
250static void cap11xx_led_set(struct led_classdev *cdev,
251 enum led_brightness value)
252{
253 struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
254
255 if (led->new_brightness == value)
256 return;
257
258 led->new_brightness = value;
259 schedule_work(&led->work);
260} 249}
261 250
262static int cap11xx_init_leds(struct device *dev, 251static int cap11xx_init_leds(struct device *dev,
@@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev,
299 led->cdev.default_trigger = 288 led->cdev.default_trigger =
300 of_get_property(child, "linux,default-trigger", NULL); 289 of_get_property(child, "linux,default-trigger", NULL);
301 led->cdev.flags = 0; 290 led->cdev.flags = 0;
302 led->cdev.brightness_set = cap11xx_led_set; 291 led->cdev.brightness_set_blocking = cap11xx_led_set;
303 led->cdev.max_brightness = 1; 292 led->cdev.max_brightness = 1;
304 led->cdev.brightness = LED_OFF; 293 led->cdev.brightness = LED_OFF;
305 294
@@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev,
312 led->reg = reg; 301 led->reg = reg;
313 led->priv = priv; 302 led->priv = priv;
314 303
315 INIT_WORK(&led->work, cap11xx_led_work);
316
317 error = devm_led_classdev_register(dev, &led->cdev); 304 error = devm_led_classdev_register(dev, &led->cdev);
318 if (error) { 305 if (error) {
319 of_node_put(child); 306 of_node_put(child);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 403452ef00e6..3d1cb7bf5e35 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
222 keypad->stopped = true; 222 keypad->stopped = true;
223 spin_unlock_irq(&keypad->lock); 223 spin_unlock_irq(&keypad->lock);
224 224
225 flush_work(&keypad->work.work); 225 flush_delayed_work(&keypad->work);
226 /* 226 /*
227 * matrix_keypad_scan() will leave IRQs enabled; 227 * matrix_keypad_scan() will leave IRQs enabled;
228 * we should disable them now. 228 * we should disable them now.
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 43b86482dda0..d466bc07aebb 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = {
58struct qt2160_led { 58struct qt2160_led {
59 struct qt2160_data *qt2160; 59 struct qt2160_data *qt2160;
60 struct led_classdev cdev; 60 struct led_classdev cdev;
61 struct work_struct work;
62 char name[32]; 61 char name[32];
63 int id; 62 int id;
64 enum led_brightness new_brightness; 63 enum led_brightness brightness;
65}; 64};
66#endif 65#endif
67 66
@@ -74,7 +73,6 @@ struct qt2160_data {
74 u16 key_matrix; 73 u16 key_matrix;
75#ifdef CONFIG_LEDS_CLASS 74#ifdef CONFIG_LEDS_CLASS
76 struct qt2160_led leds[QT2160_NUM_LEDS_X]; 75 struct qt2160_led leds[QT2160_NUM_LEDS_X];
77 struct mutex led_lock;
78#endif 76#endif
79}; 77};
80 78
@@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data);
83 81
84#ifdef CONFIG_LEDS_CLASS 82#ifdef CONFIG_LEDS_CLASS
85 83
86static void qt2160_led_work(struct work_struct *work) 84static int qt2160_led_set(struct led_classdev *cdev,
85 enum led_brightness value)
87{ 86{
88 struct qt2160_led *led = container_of(work, struct qt2160_led, work); 87 struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
89 struct qt2160_data *qt2160 = led->qt2160; 88 struct qt2160_data *qt2160 = led->qt2160;
90 struct i2c_client *client = qt2160->client; 89 struct i2c_client *client = qt2160->client;
91 int value = led->new_brightness;
92 u32 drive, pwmen; 90 u32 drive, pwmen;
93 91
94 mutex_lock(&qt2160->led_lock); 92 if (value != led->brightness) {
95 93 drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
96 drive = qt2160_read(client, QT2160_CMD_DRIVE_X); 94 pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
97 pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); 95 if (value != LED_OFF) {
98 if (value != LED_OFF) { 96 drive |= BIT(led->id);
99 drive |= (1 << led->id); 97 pwmen |= BIT(led->id);
100 pwmen |= (1 << led->id);
101
102 } else {
103 drive &= ~(1 << led->id);
104 pwmen &= ~(1 << led->id);
105 }
106 qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
107 qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
108 98
109 /* 99 } else {
110 * Changing this register will change the brightness 100 drive &= ~BIT(led->id);
111 * of every LED in the qt2160. It's a HW limitation. 101 pwmen &= ~BIT(led->id);
112 */ 102 }
113 if (value != LED_OFF) 103 qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
114 qt2160_write(client, QT2160_CMD_PWM_DUTY, value); 104 qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
115 105
116 mutex_unlock(&qt2160->led_lock); 106 /*
117} 107 * Changing this register will change the brightness
108 * of every LED in the qt2160. It's a HW limitation.
109 */
110 if (value != LED_OFF)
111 qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
118 112
119static void qt2160_led_set(struct led_classdev *cdev, 113 led->brightness = value;
120 enum led_brightness value) 114 }
121{
122 struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
123 115
124 led->new_brightness = value; 116 return 0;
125 schedule_work(&led->work);
126} 117}
127 118
128#endif /* CONFIG_LEDS_CLASS */ 119#endif /* CONFIG_LEDS_CLASS */
@@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160)
293 int ret; 284 int ret;
294 int i; 285 int i;
295 286
296 mutex_init(&qt2160->led_lock);
297
298 for (i = 0; i < QT2160_NUM_LEDS_X; i++) { 287 for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
299 struct qt2160_led *led = &qt2160->leds[i]; 288 struct qt2160_led *led = &qt2160->leds[i];
300 289
301 snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); 290 snprintf(led->name, sizeof(led->name), "qt2160:x%d", i);
302 led->cdev.name = led->name; 291 led->cdev.name = led->name;
303 led->cdev.brightness_set = qt2160_led_set; 292 led->cdev.brightness_set_blocking = qt2160_led_set;
304 led->cdev.brightness = LED_OFF; 293 led->cdev.brightness = LED_OFF;
305 led->id = i; 294 led->id = i;
306 led->qt2160 = qt2160; 295 led->qt2160 = qt2160;
307 296
308 INIT_WORK(&led->work, qt2160_led_work);
309
310 ret = led_classdev_register(&client->dev, &led->cdev); 297 ret = led_classdev_register(&client->dev, &led->cdev);
311 if (ret < 0) 298 if (ret < 0)
312 return ret; 299 return ret;
@@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160)
324{ 311{
325 int i; 312 int i;
326 313
327 for (i = 0; i < QT2160_NUM_LEDS_X; i++) { 314 for (i = 0; i < QT2160_NUM_LEDS_X; i++)
328 led_classdev_unregister(&qt2160->leds[i].cdev); 315 led_classdev_unregister(&qt2160->leds[i].cdev);
329 cancel_work_sync(&qt2160->leds[i].work);
330 }
331} 316}
332 317
333#else 318#else
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index babcfb165e4f..3b85631fde91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
153 153
154 input_dev->id.bustype = BUS_HOST; 154 input_dev->id.bustype = BUS_HOST;
155 155
156 keypad_data->input_dev = input_dev;
157
156 error = keypad_matrix_key_parse_dt(keypad_data); 158 error = keypad_matrix_key_parse_dt(keypad_data);
157 if (error) 159 if (error)
158 return error; 160 return error;
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
168 170
169 input_set_drvdata(input_dev, keypad_data); 171 input_set_drvdata(input_dev, keypad_data);
170 172
171 keypad_data->input_dev = input_dev;
172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 keypad_data->base = devm_ioremap_resource(&pdev->dev, res); 174 keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
175 if (IS_ERR(keypad_data->base)) 175 if (IS_ERR(keypad_data->base))
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index 094bddf56755..c1e66f45d552 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -22,7 +22,6 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/input-polldev.h> 23#include <linux/input-polldev.h>
24#include <linux/i2c.h> 24#include <linux/i2c.h>
25#include <linux/workqueue.h>
26#include <linux/leds.h> 25#include <linux/leds.h>
27 26
28#define APANEL_NAME "Fujitsu Application Panel" 27#define APANEL_NAME "Fujitsu Application Panel"
@@ -59,8 +58,6 @@ struct apanel {
59 struct i2c_client *client; 58 struct i2c_client *client;
60 unsigned short keymap[MAX_PANEL_KEYS]; 59 unsigned short keymap[MAX_PANEL_KEYS];
61 u16 nkeys; 60 u16 nkeys;
62 u16 led_bits;
63 struct work_struct led_work;
64 struct led_classdev mail_led; 61 struct led_classdev mail_led;
65}; 62};
66 63
@@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev)
109 report_key(idev, ap->keymap[i]); 106 report_key(idev, ap->keymap[i]);
110} 107}
111 108
112/* Track state changes of LED */ 109static int mail_led_set(struct led_classdev *led,
113static void led_update(struct work_struct *work)
114{
115 struct apanel *ap = container_of(work, struct apanel, led_work);
116
117 i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits);
118}
119
120static void mail_led_set(struct led_classdev *led,
121 enum led_brightness value) 110 enum led_brightness value)
122{ 111{
123 struct apanel *ap = container_of(led, struct apanel, mail_led); 112 struct apanel *ap = container_of(led, struct apanel, mail_led);
113 u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000;
124 114
125 if (value != LED_OFF) 115 return i2c_smbus_write_word_data(ap->client, 0x10, led_bits);
126 ap->led_bits |= 0x8000;
127 else
128 ap->led_bits &= ~0x8000;
129
130 schedule_work(&ap->led_work);
131} 116}
132 117
133static int apanel_remove(struct i2c_client *client) 118static int apanel_remove(struct i2c_client *client)
@@ -179,7 +164,7 @@ static struct apanel apanel = {
179 }, 164 },
180 .mail_led = { 165 .mail_led = {
181 .name = "mail:blue", 166 .name = "mail:blue",
182 .brightness_set = mail_led_set, 167 .brightness_set_blocking = mail_led_set,
183 }, 168 },
184}; 169};
185 170
@@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client,
235 if (err) 220 if (err)
236 goto out3; 221 goto out3;
237 222
238 INIT_WORK(&ap->led_work, led_update);
239 if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { 223 if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
240 err = led_classdev_register(&client->dev, &ap->mail_led); 224 err = led_classdev_register(&client->dev, &ap->mail_led);
241 if (err) 225 if (err)
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 1efcfdf9f8a8..dd9dd4e40827 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
481 idev->close = bma150_irq_close; 481 idev->close = bma150_irq_close;
482 input_set_drvdata(idev, bma150); 482 input_set_drvdata(idev, bma150);
483 483
484 bma150->input = idev;
485
484 error = input_register_device(idev); 486 error = input_register_device(idev);
485 if (error) { 487 if (error) {
486 input_free_device(idev); 488 input_free_device(idev);
487 return error; 489 return error;
488 } 490 }
489 491
490 bma150->input = idev;
491 return 0; 492 return 0;
492} 493}
493 494
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
510 511
511 bma150_init_input_device(bma150, ipoll_dev->input); 512 bma150_init_input_device(bma150, ipoll_dev->input);
512 513
514 bma150->input_polled = ipoll_dev;
515 bma150->input = ipoll_dev->input;
516
513 error = input_register_polled_device(ipoll_dev); 517 error = input_register_polled_device(ipoll_dev);
514 if (error) { 518 if (error) {
515 input_free_polled_device(ipoll_dev); 519 input_free_polled_device(ipoll_dev);
516 return error; 520 return error;
517 } 521 }
518 522
519 bma150->input_polled = ipoll_dev;
520 bma150->input = ipoll_dev->input;
521
522 return 0; 523 return 0;
523} 524}
524 525
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index 55da191ae550..dbb6d9e1b947 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -34,6 +34,7 @@ struct pwm_vibrator {
34 struct work_struct play_work; 34 struct work_struct play_work;
35 u16 level; 35 u16 level;
36 u32 direction_duty_cycle; 36 u32 direction_duty_cycle;
37 bool vcc_on;
37}; 38};
38 39
39static int pwm_vibrator_start(struct pwm_vibrator *vibrator) 40static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
@@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
42 struct pwm_state state; 43 struct pwm_state state;
43 int err; 44 int err;
44 45
45 err = regulator_enable(vibrator->vcc); 46 if (!vibrator->vcc_on) {
46 if (err) { 47 err = regulator_enable(vibrator->vcc);
47 dev_err(pdev, "failed to enable regulator: %d", err); 48 if (err) {
48 return err; 49 dev_err(pdev, "failed to enable regulator: %d", err);
50 return err;
51 }
52 vibrator->vcc_on = true;
49 } 53 }
50 54
51 pwm_get_state(vibrator->pwm, &state); 55 pwm_get_state(vibrator->pwm, &state);
@@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
76 80
77static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) 81static void pwm_vibrator_stop(struct pwm_vibrator *vibrator)
78{ 82{
79 regulator_disable(vibrator->vcc);
80
81 if (vibrator->pwm_dir) 83 if (vibrator->pwm_dir)
82 pwm_disable(vibrator->pwm_dir); 84 pwm_disable(vibrator->pwm_dir);
83 pwm_disable(vibrator->pwm); 85 pwm_disable(vibrator->pwm);
86
87 if (vibrator->vcc_on) {
88 regulator_disable(vibrator->vcc);
89 vibrator->vcc_on = false;
90 }
84} 91}
85 92
86static void pwm_vibrator_play_work(struct work_struct *work) 93static void pwm_vibrator_play_work(struct work_struct *work)
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index f322a1768fbb..225ae6980182 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
1336static const struct acpi_device_id elan_acpi_id[] = { 1336static const struct acpi_device_id elan_acpi_id[] = {
1337 { "ELAN0000", 0 }, 1337 { "ELAN0000", 0 },
1338 { "ELAN0100", 0 }, 1338 { "ELAN0100", 0 },
1339 { "ELAN0501", 0 },
1340 { "ELAN0600", 0 }, 1339 { "ELAN0600", 0 },
1341 { "ELAN0602", 0 }, 1340 { "ELAN0602", 0 },
1342 { "ELAN0605", 0 }, 1341 { "ELAN0605", 0 },
@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1346 { "ELAN060C", 0 }, 1345 { "ELAN060C", 0 },
1347 { "ELAN0611", 0 }, 1346 { "ELAN0611", 0 },
1348 { "ELAN0612", 0 }, 1347 { "ELAN0612", 0 },
1348 { "ELAN0617", 0 },
1349 { "ELAN0618", 0 }, 1349 { "ELAN0618", 0 },
1350 { "ELAN061C", 0 }, 1350 { "ELAN061C", 0 },
1351 { "ELAN061D", 0 }, 1351 { "ELAN061D", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 9fe075c137dc..a7f8b1614559 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1119 * Asus UX31 0x361f00 20, 15, 0e clickpad 1119 * Asus UX31 0x361f00 20, 15, 0e clickpad
1120 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1120 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1121 * Avatar AVIU-145A2 0x361f00 ? clickpad 1121 * Avatar AVIU-145A2 0x361f00 ? clickpad
1122 * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
1123 * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
1122 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1124 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1123 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons 1125 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
1124 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons 1126 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1171 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), 1173 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
1172 }, 1174 },
1173 }, 1175 },
1176 {
1177 /* Fujitsu H780 also has a middle button */
1178 .matches = {
1179 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1180 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
1181 },
1182 },
1174#endif 1183#endif
1175 { } 1184 { }
1176}; 1185};
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index c62cceb97bb1..5e8d8384aa2a 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio)
76{ 76{
77 struct ps2_gpio_data *drvdata = serio->port_data; 77 struct ps2_gpio_data *drvdata = serio->port_data;
78 78
79 flush_delayed_work(&drvdata->tx_work);
79 disable_irq(drvdata->irq); 80 disable_irq(drvdata->irq);
80} 81}
81 82
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index d713271ebf7c..a64116586b4c 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan)
1396 1396
1397 /* Clear ring flush state */ 1397 /* Clear ring flush state */
1398 timeout = 1000; /* timeout of 1s */ 1398 timeout = 1000; /* timeout of 1s */
1399 writel_relaxed(0x0, ring + RING_CONTROL); 1399 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1400 do { 1400 do {
1401 if (!(readl_relaxed(ring + RING_FLUSH_DONE) & 1401 if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1402 FLUSH_DONE_MASK)) 1402 FLUSH_DONE_MASK))
1403 break; 1403 break;
1404 mdelay(1); 1404 mdelay(1);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index c6a7d4582dc6..38d9df3fb199 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
310 310
311 return ret; 311 return ret;
312} 312}
313EXPORT_SYMBOL_GPL(mbox_flush);
313 314
314/** 315/**
315 * mbox_request_channel - Request a mailbox channel. 316 * mbox_request_channel - Request a mailbox channel.
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 47d4e0d30bf0..dd538e6b2748 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
932 if (IS_ERR(bip)) 932 if (IS_ERR(bip))
933 return PTR_ERR(bip); 933 return PTR_ERR(bip);
934 934
935 tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); 935 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
936 936
937 bip->bip_iter.bi_size = tag_len; 937 bip->bip_iter.bi_size = tag_len;
938 bip->bip_iter.bi_sector = io->cc->start + io->sector; 938 bip->bip_iter.bi_sector = io->cc->start + io->sector;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ca8af21bf644..e83b63608262 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -257,6 +257,7 @@ struct pool {
257 257
258 spinlock_t lock; 258 spinlock_t lock;
259 struct bio_list deferred_flush_bios; 259 struct bio_list deferred_flush_bios;
260 struct bio_list deferred_flush_completions;
260 struct list_head prepared_mappings; 261 struct list_head prepared_mappings;
261 struct list_head prepared_discards; 262 struct list_head prepared_discards;
262 struct list_head prepared_discards_pt2; 263 struct list_head prepared_discards_pt2;
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
956 mempool_free(m, &m->tc->pool->mapping_pool); 957 mempool_free(m, &m->tc->pool->mapping_pool);
957} 958}
958 959
960static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
961{
962 struct pool *pool = tc->pool;
963 unsigned long flags;
964
965 /*
966 * If the bio has the REQ_FUA flag set we must commit the metadata
967 * before signaling its completion.
968 */
969 if (!bio_triggers_commit(tc, bio)) {
970 bio_endio(bio);
971 return;
972 }
973
974 /*
975 * Complete bio with an error if earlier I/O caused changes to the
976 * metadata that can't be committed, e.g, due to I/O errors on the
977 * metadata device.
978 */
979 if (dm_thin_aborted_changes(tc->td)) {
980 bio_io_error(bio);
981 return;
982 }
983
984 /*
985 * Batch together any bios that trigger commits and then issue a
986 * single commit for them in process_deferred_bios().
987 */
988 spin_lock_irqsave(&pool->lock, flags);
989 bio_list_add(&pool->deferred_flush_completions, bio);
990 spin_unlock_irqrestore(&pool->lock, flags);
991}
992
959static void process_prepared_mapping(struct dm_thin_new_mapping *m) 993static void process_prepared_mapping(struct dm_thin_new_mapping *m)
960{ 994{
961 struct thin_c *tc = m->tc; 995 struct thin_c *tc = m->tc;
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
988 */ 1022 */
989 if (bio) { 1023 if (bio) {
990 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 1024 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
991 bio_endio(bio); 1025 complete_overwrite_bio(tc, bio);
992 } else { 1026 } else {
993 inc_all_io_entry(tc->pool, m->cell->holder); 1027 inc_all_io_entry(tc->pool, m->cell->holder);
994 remap_and_issue(tc, m->cell->holder, m->data_block); 1028 remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
2317{ 2351{
2318 unsigned long flags; 2352 unsigned long flags;
2319 struct bio *bio; 2353 struct bio *bio;
2320 struct bio_list bios; 2354 struct bio_list bios, bio_completions;
2321 struct thin_c *tc; 2355 struct thin_c *tc;
2322 2356
2323 tc = get_first_thin(pool); 2357 tc = get_first_thin(pool);
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
2328 } 2362 }
2329 2363
2330 /* 2364 /*
2331 * If there are any deferred flush bios, we must commit 2365 * If there are any deferred flush bios, we must commit the metadata
2332 * the metadata before issuing them. 2366 * before issuing them or signaling their completion.
2333 */ 2367 */
2334 bio_list_init(&bios); 2368 bio_list_init(&bios);
2369 bio_list_init(&bio_completions);
2370
2335 spin_lock_irqsave(&pool->lock, flags); 2371 spin_lock_irqsave(&pool->lock, flags);
2336 bio_list_merge(&bios, &pool->deferred_flush_bios); 2372 bio_list_merge(&bios, &pool->deferred_flush_bios);
2337 bio_list_init(&pool->deferred_flush_bios); 2373 bio_list_init(&pool->deferred_flush_bios);
2374
2375 bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
2376 bio_list_init(&pool->deferred_flush_completions);
2338 spin_unlock_irqrestore(&pool->lock, flags); 2377 spin_unlock_irqrestore(&pool->lock, flags);
2339 2378
2340 if (bio_list_empty(&bios) && 2379 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
2341 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) 2380 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
2342 return; 2381 return;
2343 2382
2344 if (commit(pool)) { 2383 if (commit(pool)) {
2384 bio_list_merge(&bios, &bio_completions);
2385
2345 while ((bio = bio_list_pop(&bios))) 2386 while ((bio = bio_list_pop(&bios)))
2346 bio_io_error(bio); 2387 bio_io_error(bio);
2347 return; 2388 return;
2348 } 2389 }
2349 pool->last_commit_jiffies = jiffies; 2390 pool->last_commit_jiffies = jiffies;
2350 2391
2392 while ((bio = bio_list_pop(&bio_completions)))
2393 bio_endio(bio);
2394
2351 while ((bio = bio_list_pop(&bios))) 2395 while ((bio = bio_list_pop(&bios)))
2352 generic_make_request(bio); 2396 generic_make_request(bio);
2353} 2397}
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2954 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); 2998 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2955 spin_lock_init(&pool->lock); 2999 spin_lock_init(&pool->lock);
2956 bio_list_init(&pool->deferred_flush_bios); 3000 bio_list_init(&pool->deferred_flush_bios);
3001 bio_list_init(&pool->deferred_flush_completions);
2957 INIT_LIST_HEAD(&pool->prepared_mappings); 3002 INIT_LIST_HEAD(&pool->prepared_mappings);
2958 INIT_LIST_HEAD(&pool->prepared_discards); 3003 INIT_LIST_HEAD(&pool->prepared_discards);
2959 INIT_LIST_HEAD(&pool->prepared_discards_pt2); 3004 INIT_LIST_HEAD(&pool->prepared_discards_pt2);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1d54109071cc..fa47249fa3e4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
1863 reschedule_retry(r1_bio); 1863 reschedule_retry(r1_bio);
1864} 1864}
1865 1865
1866static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1867{
1868 sector_t sync_blocks = 0;
1869 sector_t s = r1_bio->sector;
1870 long sectors_to_go = r1_bio->sectors;
1871
1872 /* make sure these bits don't get cleared. */
1873 do {
1874 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1875 s += sync_blocks;
1876 sectors_to_go -= sync_blocks;
1877 } while (sectors_to_go > 0);
1878}
1879
1866static void end_sync_write(struct bio *bio) 1880static void end_sync_write(struct bio *bio)
1867{ 1881{
1868 int uptodate = !bio->bi_status; 1882 int uptodate = !bio->bi_status;
@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
1874 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; 1888 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1875 1889
1876 if (!uptodate) { 1890 if (!uptodate) {
1877 sector_t sync_blocks = 0; 1891 abort_sync_write(mddev, r1_bio);
1878 sector_t s = r1_bio->sector;
1879 long sectors_to_go = r1_bio->sectors;
1880 /* make sure these bits doesn't get cleared. */
1881 do {
1882 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1883 s += sync_blocks;
1884 sectors_to_go -= sync_blocks;
1885 } while (sectors_to_go > 0);
1886 set_bit(WriteErrorSeen, &rdev->flags); 1892 set_bit(WriteErrorSeen, &rdev->flags);
1887 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1893 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1888 set_bit(MD_RECOVERY_NEEDED, & 1894 set_bit(MD_RECOVERY_NEEDED, &
@@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2172 (i == r1_bio->read_disk || 2178 (i == r1_bio->read_disk ||
2173 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 2179 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2174 continue; 2180 continue;
2175 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 2181 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2182 abort_sync_write(mddev, r1_bio);
2176 continue; 2183 continue;
2184 }
2177 2185
2178 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2186 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2179 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) 2187 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index aef1185f383d..14f3fdb8c6bb 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2112,7 +2112,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
2112 if (waiting) 2112 if (waiting)
2113 wake_up(&mq->wait); 2113 wake_up(&mq->wait);
2114 else 2114 else
2115 kblockd_schedule_work(&mq->complete_work); 2115 queue_work(mq->card->complete_wq, &mq->complete_work);
2116 2116
2117 return; 2117 return;
2118 } 2118 }
@@ -2924,6 +2924,13 @@ static int mmc_blk_probe(struct mmc_card *card)
2924 2924
2925 mmc_fixup_device(card, mmc_blk_fixups); 2925 mmc_fixup_device(card, mmc_blk_fixups);
2926 2926
2927 card->complete_wq = alloc_workqueue("mmc_complete",
2928 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2929 if (unlikely(!card->complete_wq)) {
2930 pr_err("Failed to create mmc completion workqueue");
2931 return -ENOMEM;
2932 }
2933
2927 md = mmc_blk_alloc(card); 2934 md = mmc_blk_alloc(card);
2928 if (IS_ERR(md)) 2935 if (IS_ERR(md))
2929 return PTR_ERR(md); 2936 return PTR_ERR(md);
@@ -2987,6 +2994,7 @@ static void mmc_blk_remove(struct mmc_card *card)
2987 pm_runtime_put_noidle(&card->dev); 2994 pm_runtime_put_noidle(&card->dev);
2988 mmc_blk_remove_req(md); 2995 mmc_blk_remove_req(md);
2989 dev_set_drvdata(&card->dev, NULL); 2996 dev_set_drvdata(&card->dev, NULL);
2997 destroy_workqueue(card->complete_wq);
2990} 2998}
2991 2999
2992static int _mmc_blk_suspend(struct mmc_card *card) 3000static int _mmc_blk_suspend(struct mmc_card *card)
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index f19ec60bcbdc..2eba507790e4 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1338,7 +1338,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1338 host->regs + SD_EMMC_IRQ_EN); 1338 host->regs + SD_EMMC_IRQ_EN);
1339 1339
1340 ret = request_threaded_irq(host->irq, meson_mmc_irq, 1340 ret = request_threaded_irq(host->irq, meson_mmc_irq,
1341 meson_mmc_irq_thread, IRQF_SHARED, NULL, host); 1341 meson_mmc_irq_thread, IRQF_SHARED,
1342 dev_name(&pdev->dev), host);
1342 if (ret) 1343 if (ret)
1343 goto err_init_clk; 1344 goto err_init_clk;
1344 1345
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 279e326e397e..70fadc976795 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1399,13 +1399,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
1399 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 1399 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1400 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1400 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1401 1401
1402 if (host->cfg->clk_delays || host->use_new_timings) 1402 /*
1403 * Some H5 devices do not have signal traces precise enough to
1404 * use HS DDR mode for their eMMC chips.
1405 *
1406 * We still enable HS DDR modes for all the other controller
1407 * variants that support them.
1408 */
1409 if ((host->cfg->clk_delays || host->use_new_timings) &&
1410 !of_device_is_compatible(pdev->dev.of_node,
1411 "allwinner,sun50i-h5-emmc"))
1403 mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; 1412 mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
1404 1413
1405 ret = mmc_of_parse(mmc); 1414 ret = mmc_of_parse(mmc);
1406 if (ret) 1415 if (ret)
1407 goto error_free_dma; 1416 goto error_free_dma;
1408 1417
1418 /*
1419 * If we don't support delay chains in the SoC, we can't use any
1420 * of the higher speed modes. Mask them out in case the device
1421 * tree specifies the properties for them, which gets added to
1422 * the caps by mmc_of_parse() above.
1423 */
1424 if (!(host->cfg->clk_delays || host->use_new_timings)) {
1425 mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR |
1426 MMC_CAP_1_2V_DDR | MMC_CAP_UHS);
1427 mmc->caps2 &= ~MMC_CAP2_HS200;
1428 }
1429
1430 /* TODO: This driver doesn't support HS400 mode yet */
1431 mmc->caps2 &= ~MMC_CAP2_HS400;
1432
1409 ret = sunxi_mmc_init_host(host); 1433 ret = sunxi_mmc_init_host(host);
1410 if (ret) 1434 if (ret)
1411 goto error_free_dma; 1435 goto error_free_dma;
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 22f753e555ac..83f88b8b5d9f 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev,
212 * Going to have to check what details I need to set and how to 212 * Going to have to check what details I need to set and how to
213 * get them 213 * get them
214 */ 214 */
215 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node); 215 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
216 mtd->type = MTD_NORFLASH; 216 mtd->type = MTD_NORFLASH;
217 mtd->flags = MTD_WRITEABLE; 217 mtd->flags = MTD_WRITEABLE;
218 mtd->size = size; 218 mtd->size = size;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 999b705769a8..3ef01baef9b6 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -507,6 +507,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
507{ 507{
508 struct nvmem_config config = {}; 508 struct nvmem_config config = {};
509 509
510 config.id = -1;
510 config.dev = &mtd->dev; 511 config.dev = &mtd->dev;
511 config.name = mtd->name; 512 config.name = mtd->name;
512 config.owner = THIS_MODULE; 513 config.owner = THIS_MODULE;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index edb1c023a753..21bf8ac78380 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -197,9 +197,9 @@ config VXLAN
197 197
198config GENEVE 198config GENEVE
199 tristate "Generic Network Virtualization Encapsulation" 199 tristate "Generic Network Virtualization Encapsulation"
200 depends on INET && NET_UDP_TUNNEL 200 depends on INET
201 depends on IPV6 || !IPV6 201 depends on IPV6 || !IPV6
202 select NET_IP_TUNNEL 202 select NET_UDP_TUNNEL
203 select GRO_CELLS 203 select GRO_CELLS
204 ---help--- 204 ---help---
205 This allows one to create geneve virtual interfaces that provide 205 This allows one to create geneve virtual interfaces that provide
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 485462d3087f..537c90c8eb0a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1183,29 +1183,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1183 } 1183 }
1184 } 1184 }
1185 1185
1186 /* Link-local multicast packets should be passed to the 1186 /*
1187 * stack on the link they arrive as well as pass them to the 1187 * For packets determined by bond_should_deliver_exact_match() call to
1188 * bond-master device. These packets are mostly usable when 1188 * be suppressed we want to make an exception for link-local packets.
1189 * stack receives it with the link on which they arrive 1189 * This is necessary for e.g. LLDP daemons to be able to monitor
1190 * (e.g. LLDP) they also must be available on master. Some of 1190 * inactive slave links without being forced to bind to them
1191 * the use cases include (but are not limited to): LLDP agents 1191 * explicitly.
1192 * that must be able to operate both on enslaved interfaces as 1192 *
1193 * well as on bonds themselves; linux bridges that must be able 1193 * At the same time, packets that are passed to the bonding master
1194 * to process/pass BPDUs from attached bonds when any kind of 1194 * (including link-local ones) can have their originating interface
1195 * STP version is enabled on the network. 1195 * determined via PACKET_ORIGDEV socket option.
1196 */ 1196 */
1197 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { 1197 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1198 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1198 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1199 1199 return RX_HANDLER_PASS;
1200 if (nskb) {
1201 nskb->dev = bond->dev;
1202 nskb->queue_mapping = 0;
1203 netif_rx(nskb);
1204 }
1205 return RX_HANDLER_PASS;
1206 }
1207 if (bond_should_deliver_exact_match(skb, slave, bond))
1208 return RX_HANDLER_EXACT; 1200 return RX_HANDLER_EXACT;
1201 }
1209 1202
1210 skb->dev = bond->dev; 1203 skb->dev = bond->dev;
1211 1204
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0e4bbdcc614f..c76892ac4e69 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
344 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 344 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
345} 345}
346 346
347static void b53_enable_vlan(struct b53_device *dev, bool enable) 347static void b53_enable_vlan(struct b53_device *dev, bool enable,
348 bool enable_filtering)
348{ 349{
349 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 350 u8 mgmt, vc0, vc1, vc4 = 0, vc5;
350 351
@@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
369 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 370 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
370 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; 371 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
371 vc4 &= ~VC4_ING_VID_CHECK_MASK; 372 vc4 &= ~VC4_ING_VID_CHECK_MASK;
372 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 373 if (enable_filtering) {
373 vc5 |= VC5_DROP_VTABLE_MISS; 374 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
375 vc5 |= VC5_DROP_VTABLE_MISS;
376 } else {
377 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
378 vc5 &= ~VC5_DROP_VTABLE_MISS;
379 }
374 380
375 if (is5325(dev)) 381 if (is5325(dev))
376 vc0 &= ~VC0_RESERVED_1; 382 vc0 &= ~VC0_RESERVED_1;
@@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
420 } 426 }
421 427
422 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 428 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
429
430 dev->vlan_enabled = enable;
431 dev->vlan_filtering_enabled = enable_filtering;
423} 432}
424 433
425static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 434static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev)
632 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 641 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
633} 642}
634 643
644static u16 b53_default_pvid(struct b53_device *dev)
645{
646 if (is5325(dev) || is5365(dev))
647 return 1;
648 else
649 return 0;
650}
651
635int b53_configure_vlan(struct dsa_switch *ds) 652int b53_configure_vlan(struct dsa_switch *ds)
636{ 653{
637 struct b53_device *dev = ds->priv; 654 struct b53_device *dev = ds->priv;
638 struct b53_vlan vl = { 0 }; 655 struct b53_vlan vl = { 0 };
639 int i; 656 int i, def_vid;
657
658 def_vid = b53_default_pvid(dev);
640 659
641 /* clear all vlan entries */ 660 /* clear all vlan entries */
642 if (is5325(dev) || is5365(dev)) { 661 if (is5325(dev) || is5365(dev)) {
643 for (i = 1; i < dev->num_vlans; i++) 662 for (i = def_vid; i < dev->num_vlans; i++)
644 b53_set_vlan_entry(dev, i, &vl); 663 b53_set_vlan_entry(dev, i, &vl);
645 } else { 664 } else {
646 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 665 b53_do_vlan_op(dev, VTA_CMD_CLEAR);
647 } 666 }
648 667
649 b53_enable_vlan(dev, false); 668 b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
650 669
651 b53_for_each_port(dev, i) 670 b53_for_each_port(dev, i)
652 b53_write16(dev, B53_VLAN_PAGE, 671 b53_write16(dev, B53_VLAN_PAGE,
653 B53_VLAN_PORT_DEF_TAG(i), 1); 672 B53_VLAN_PORT_DEF_TAG(i), def_vid);
654 673
655 if (!is5325(dev) && !is5365(dev)) 674 if (!is5325(dev) && !is5365(dev))
656 b53_set_jumbo(dev, dev->enable_jumbo, false); 675 b53_set_jumbo(dev, dev->enable_jumbo, false);
@@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
1255 1274
1256int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) 1275int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
1257{ 1276{
1277 struct b53_device *dev = ds->priv;
1278 struct net_device *bridge_dev;
1279 unsigned int i;
1280 u16 pvid, new_pvid;
1281
1282 /* Handle the case were multiple bridges span the same switch device
1283 * and one of them has a different setting than what is being requested
1284 * which would be breaking filtering semantics for any of the other
1285 * bridge devices.
1286 */
1287 b53_for_each_port(dev, i) {
1288 bridge_dev = dsa_to_port(ds, i)->bridge_dev;
1289 if (bridge_dev &&
1290 bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
1291 br_vlan_enabled(bridge_dev) != vlan_filtering) {
1292 netdev_err(bridge_dev,
1293 "VLAN filtering is global to the switch!\n");
1294 return -EINVAL;
1295 }
1296 }
1297
1298 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1299 new_pvid = pvid;
1300 if (dev->vlan_filtering_enabled && !vlan_filtering) {
1301 /* Filtering is currently enabled, use the default PVID since
1302 * the bridge does not expect tagging anymore
1303 */
1304 dev->ports[port].pvid = pvid;
1305 new_pvid = b53_default_pvid(dev);
1306 } else if (!dev->vlan_filtering_enabled && vlan_filtering) {
1307 /* Filtering is currently disabled, restore the previous PVID */
1308 new_pvid = dev->ports[port].pvid;
1309 }
1310
1311 if (pvid != new_pvid)
1312 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1313 new_pvid);
1314
1315 b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
1316
1258 return 0; 1317 return 0;
1259} 1318}
1260EXPORT_SYMBOL(b53_vlan_filtering); 1319EXPORT_SYMBOL(b53_vlan_filtering);
@@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
1270 if (vlan->vid_end > dev->num_vlans) 1329 if (vlan->vid_end > dev->num_vlans)
1271 return -ERANGE; 1330 return -ERANGE;
1272 1331
1273 b53_enable_vlan(dev, true); 1332 b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
1274 1333
1275 return 0; 1334 return 0;
1276} 1335}
@@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
1300 b53_fast_age_vlan(dev, vid); 1359 b53_fast_age_vlan(dev, vid);
1301 } 1360 }
1302 1361
1303 if (pvid) { 1362 if (pvid && !dsa_is_cpu_port(ds, port)) {
1304 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1363 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1305 vlan->vid_end); 1364 vlan->vid_end);
1306 b53_fast_age_vlan(dev, vid); 1365 b53_fast_age_vlan(dev, vid);
@@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
1326 1385
1327 vl->members &= ~BIT(port); 1386 vl->members &= ~BIT(port);
1328 1387
1329 if (pvid == vid) { 1388 if (pvid == vid)
1330 if (is5325(dev) || is5365(dev)) 1389 pvid = b53_default_pvid(dev);
1331 pvid = 1;
1332 else
1333 pvid = 0;
1334 }
1335 1390
1336 if (untagged && !dsa_is_cpu_port(ds, port)) 1391 if (untagged && !dsa_is_cpu_port(ds, port))
1337 vl->untag &= ~(BIT(port)); 1392 vl->untag &= ~(BIT(port));
@@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
1644 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 1699 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1645 dev->ports[port].vlan_ctl_mask = pvlan; 1700 dev->ports[port].vlan_ctl_mask = pvlan;
1646 1701
1647 if (is5325(dev) || is5365(dev)) 1702 pvid = b53_default_pvid(dev);
1648 pvid = 1;
1649 else
1650 pvid = 0;
1651 1703
1652 /* Make this port join all VLANs without VLAN entries */ 1704 /* Make this port join all VLANs without VLAN entries */
1653 if (is58xx(dev)) { 1705 if (is58xx(dev)) {
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index ec796482792d..4dc7ee38b258 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -91,6 +91,7 @@ enum {
91struct b53_port { 91struct b53_port {
92 u16 vlan_ctl_mask; 92 u16 vlan_ctl_mask;
93 struct ethtool_eee eee; 93 struct ethtool_eee eee;
94 u16 pvid;
94}; 95};
95 96
96struct b53_vlan { 97struct b53_vlan {
@@ -137,6 +138,8 @@ struct b53_device {
137 138
138 unsigned int num_vlans; 139 unsigned int num_vlans;
139 struct b53_vlan *vlans; 140 struct b53_vlan *vlans;
141 bool vlan_enabled;
142 bool vlan_filtering_enabled;
140 unsigned int num_ports; 143 unsigned int num_ports;
141 struct b53_port *ports; 144 struct b53_port *ports;
142}; 145};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 361fbde76654..14138d423cf1 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -690,7 +690,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
690 * port, the other ones have already been disabled during 690 * port, the other ones have already been disabled during
691 * bcm_sf2_sw_setup 691 * bcm_sf2_sw_setup
692 */ 692 */
693 for (port = 0; port < DSA_MAX_PORTS; port++) { 693 for (port = 0; port < ds->num_ports; port++) {
694 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) 694 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
695 bcm_sf2_port_disable(ds, port, NULL); 695 bcm_sf2_port_disable(ds, port, NULL);
696 } 696 }
@@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
726{ 726{
727 struct net_device *p = ds->ports[port].cpu_dp->master; 727 struct net_device *p = ds->ports[port].cpu_dp->master;
728 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 728 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
729 struct ethtool_wolinfo pwol; 729 struct ethtool_wolinfo pwol = { };
730 730
731 /* Get the parent device WoL settings */ 731 /* Get the parent device WoL settings */
732 p->ethtool_ops->get_wol(p, &pwol); 732 if (p->ethtool_ops->get_wol)
733 p->ethtool_ops->get_wol(p, &pwol);
733 734
734 /* Advertise the parent device supported settings */ 735 /* Advertise the parent device supported settings */
735 wol->supported = pwol.supported; 736 wol->supported = pwol.supported;
@@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
750 struct net_device *p = ds->ports[port].cpu_dp->master; 751 struct net_device *p = ds->ports[port].cpu_dp->master;
751 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 752 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
752 s8 cpu_port = ds->ports[port].cpu_dp->index; 753 s8 cpu_port = ds->ports[port].cpu_dp->index;
753 struct ethtool_wolinfo pwol; 754 struct ethtool_wolinfo pwol = { };
754 755
755 p->ethtool_ops->get_wol(p, &pwol); 756 if (p->ethtool_ops->get_wol)
757 p->ethtool_ops->get_wol(p, &pwol);
756 if (wol->wolopts & ~pwol.supported) 758 if (wol->wolopts & ~pwol.supported)
757 return -EINVAL; 759 return -EINVAL;
758 760
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8dca2c949e73..12fd7ce3f1ff 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
261 unsigned int sub_irq; 261 unsigned int sub_irq;
262 unsigned int n; 262 unsigned int n;
263 u16 reg; 263 u16 reg;
264 u16 ctl1;
264 int err; 265 int err;
265 266
266 mutex_lock(&chip->reg_lock); 267 mutex_lock(&chip->reg_lock);
@@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
270 if (err) 271 if (err)
271 goto out; 272 goto out;
272 273
273 for (n = 0; n < chip->g1_irq.nirqs; ++n) { 274 do {
274 if (reg & (1 << n)) { 275 for (n = 0; n < chip->g1_irq.nirqs; ++n) {
275 sub_irq = irq_find_mapping(chip->g1_irq.domain, n); 276 if (reg & (1 << n)) {
276 handle_nested_irq(sub_irq); 277 sub_irq = irq_find_mapping(chip->g1_irq.domain,
277 ++nhandled; 278 n);
279 handle_nested_irq(sub_irq);
280 ++nhandled;
281 }
278 } 282 }
279 } 283
284 mutex_lock(&chip->reg_lock);
285 err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
286 if (err)
287 goto unlock;
288 err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
289unlock:
290 mutex_unlock(&chip->reg_lock);
291 if (err)
292 goto out;
293 ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
294 } while (reg & ctl1);
295
280out: 296out:
281 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); 297 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
282} 298}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a70bb1bb90e7..a6eacf2099c3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
2663 goto err_device_destroy; 2663 goto err_device_destroy;
2664 } 2664 }
2665 2665
2666 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2667 /* Make sure we don't have a race with AENQ Links state handler */
2668 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2669 netif_carrier_on(adapter->netdev);
2670
2671 rc = ena_enable_msix_and_set_admin_interrupts(adapter, 2666 rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2672 adapter->num_queues); 2667 adapter->num_queues);
2673 if (rc) { 2668 if (rc) {
@@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
2684 } 2679 }
2685 2680
2686 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2681 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2682
2683 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2684 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2685 netif_carrier_on(adapter->netdev);
2686
2687 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 2687 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2688 dev_err(&pdev->dev, 2688 dev_err(&pdev->dev,
2689 "Device reset completed successfully, Driver info: %s\n", 2689 "Device reset completed successfully, Driver info: %s\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index dc8b6173d8d8..63870072cbbd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
45 45
46#define DRV_MODULE_VER_MAJOR 2 46#define DRV_MODULE_VER_MAJOR 2
47#define DRV_MODULE_VER_MINOR 0 47#define DRV_MODULE_VER_MINOR 0
48#define DRV_MODULE_VER_SUBMINOR 2 48#define DRV_MODULE_VER_SUBMINOR 3
49 49
50#define DRV_MODULE_NAME "ena" 50#define DRV_MODULE_NAME "ena"
51#ifndef DRV_MODULE_VERSION 51#ifndef DRV_MODULE_VERSION
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bb41becb6609..31ff1e0d1baa 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1335{ 1335{
1336 struct net_device *netdev; 1336 struct net_device *netdev;
1337 struct atl2_adapter *adapter; 1337 struct atl2_adapter *adapter;
1338 static int cards_found; 1338 static int cards_found = 0;
1339 unsigned long mmio_start; 1339 unsigned long mmio_start;
1340 int mmio_len; 1340 int mmio_len;
1341 int err; 1341 int err;
1342 1342
1343 cards_found = 0;
1344
1345 err = pci_enable_device(pdev); 1343 err = pci_enable_device(pdev);
1346 if (err) 1344 if (err)
1347 return err; 1345 return err;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 28c9b0bdf2f6..bc3ac369cbe3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev,
134 134
135 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 135 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
136 reg = rxchk_readl(priv, RXCHK_CONTROL); 136 reg = rxchk_readl(priv, RXCHK_CONTROL);
137 /* Clear L2 header checks, which would prevent BPDUs
138 * from being received.
139 */
140 reg &= ~RXCHK_L2_HDR_DIS;
137 if (priv->rx_chk_en) 141 if (priv->rx_chk_en)
138 reg |= RXCHK_EN; 142 reg |= RXCHK_EN;
139 else 143 else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8bc7e495b027..d95730c6e0f2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3903,7 +3903,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3903 if (len) 3903 if (len)
3904 break; 3904 break;
3905 /* on first few passes, just barely sleep */ 3905 /* on first few passes, just barely sleep */
3906 if (i < DFLT_HWRM_CMD_TIMEOUT) 3906 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3907 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 3907 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3908 HWRM_SHORT_MAX_TIMEOUT); 3908 HWRM_SHORT_MAX_TIMEOUT);
3909 else 3909 else
@@ -3926,7 +3926,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3926 dma_rmb(); 3926 dma_rmb();
3927 if (*valid) 3927 if (*valid)
3928 break; 3928 break;
3929 udelay(1); 3929 usleep_range(1, 5);
3930 } 3930 }
3931 3931
3932 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 3932 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a451796deefe..2fb653e0048d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -582,7 +582,7 @@ struct nqe_cn {
582 (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ 582 (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
583 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) 583 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
584 584
585#define HWRM_VALID_BIT_DELAY_USEC 20 585#define HWRM_VALID_BIT_DELAY_USEC 150
586 586
587#define BNXT_HWRM_CHNL_CHIMP 0 587#define BNXT_HWRM_CHNL_CHIMP 0
588#define BNXT_HWRM_CHNL_KONG 1 588#define BNXT_HWRM_CHNL_KONG 1
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index f4d81765221e..62636c1ed141 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -271,7 +271,7 @@ struct xcast_addr_list {
271}; 271};
272 272
273struct nicvf_work { 273struct nicvf_work {
274 struct delayed_work work; 274 struct work_struct work;
275 u8 mode; 275 u8 mode;
276 struct xcast_addr_list *mc; 276 struct xcast_addr_list *mc;
277}; 277};
@@ -327,7 +327,11 @@ struct nicvf {
327 struct nicvf_work rx_mode_work; 327 struct nicvf_work rx_mode_work;
328 /* spinlock to protect workqueue arguments from concurrent access */ 328 /* spinlock to protect workqueue arguments from concurrent access */
329 spinlock_t rx_mode_wq_lock; 329 spinlock_t rx_mode_wq_lock;
330 330 /* workqueue for handling kernel ndo_set_rx_mode() calls */
331 struct workqueue_struct *nicvf_rx_mode_wq;
332 /* mutex to protect VF's mailbox contents from concurrent access */
333 struct mutex rx_mode_mtx;
334 struct delayed_work link_change_work;
331 /* PTP timestamp */ 335 /* PTP timestamp */
332 struct cavium_ptp *ptp_clock; 336 struct cavium_ptp *ptp_clock;
333 /* Inbound timestamping is on */ 337 /* Inbound timestamping is on */
@@ -575,10 +579,8 @@ struct set_ptp {
575 579
576struct xcast { 580struct xcast {
577 u8 msg; 581 u8 msg;
578 union { 582 u8 mode;
579 u8 mode; 583 u64 mac:48;
580 u64 mac;
581 } data;
582}; 584};
583 585
584/* 128 bit shared memory between PF and each VF */ 586/* 128 bit shared memory between PF and each VF */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6c8dcb65ff03..c90252829ed3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -57,14 +57,8 @@ struct nicpf {
57#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) 57#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
58#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) 58#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
59 u8 *vf_lmac_map; 59 u8 *vf_lmac_map;
60 struct delayed_work dwork;
61 struct workqueue_struct *check_link;
62 u8 *link;
63 u8 *duplex;
64 u32 *speed;
65 u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; 60 u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
66 u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; 61 u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
67 bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
68 62
69 /* MSI-X */ 63 /* MSI-X */
70 u8 num_vec; 64 u8 num_vec;
@@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
929 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); 923 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
930} 924}
931 925
926/* Get BGX LMAC link status and update corresponding VF
927 * if there is a change, valid only if internal L2 switch
928 * is not present otherwise VF link is always treated as up
929 */
930static void nic_link_status_get(struct nicpf *nic, u8 vf)
931{
932 union nic_mbx mbx = {};
933 struct bgx_link_status link;
934 u8 bgx, lmac;
935
936 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
937
938 /* Get BGX, LMAC indices for the VF */
939 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
940 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
941
942 /* Get interface link status */
943 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
944
945 /* Send a mbox message to VF with current link status */
946 mbx.link_status.link_up = link.link_up;
947 mbx.link_status.duplex = link.duplex;
948 mbx.link_status.speed = link.speed;
949 mbx.link_status.mac_type = link.mac_type;
950
951 /* reply with link status */
952 nic_send_msg_to_vf(nic, vf, &mbx);
953}
954
932/* Interrupt handler to handle mailbox messages from VFs */ 955/* Interrupt handler to handle mailbox messages from VFs */
933static void nic_handle_mbx_intr(struct nicpf *nic, int vf) 956static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
934{ 957{
@@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
941 int i; 964 int i;
942 int ret = 0; 965 int ret = 0;
943 966
944 nic->mbx_lock[vf] = true;
945
946 mbx_addr = nic_get_mbx_addr(vf); 967 mbx_addr = nic_get_mbx_addr(vf);
947 mbx_data = (u64 *)&mbx; 968 mbx_data = (u64 *)&mbx;
948 969
@@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
957 switch (mbx.msg.msg) { 978 switch (mbx.msg.msg) {
958 case NIC_MBOX_MSG_READY: 979 case NIC_MBOX_MSG_READY:
959 nic_mbx_send_ready(nic, vf); 980 nic_mbx_send_ready(nic, vf);
960 if (vf < nic->num_vf_en) { 981 return;
961 nic->link[vf] = 0;
962 nic->duplex[vf] = 0;
963 nic->speed[vf] = 0;
964 }
965 goto unlock;
966 case NIC_MBOX_MSG_QS_CFG: 982 case NIC_MBOX_MSG_QS_CFG:
967 reg_addr = NIC_PF_QSET_0_127_CFG | 983 reg_addr = NIC_PF_QSET_0_127_CFG |
968 (mbx.qs.num << NIC_QS_ID_SHIFT); 984 (mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1031 break; 1047 break;
1032 case NIC_MBOX_MSG_RSS_SIZE: 1048 case NIC_MBOX_MSG_RSS_SIZE:
1033 nic_send_rss_size(nic, vf); 1049 nic_send_rss_size(nic, vf);
1034 goto unlock; 1050 return;
1035 case NIC_MBOX_MSG_RSS_CFG: 1051 case NIC_MBOX_MSG_RSS_CFG:
1036 case NIC_MBOX_MSG_RSS_CFG_CONT: 1052 case NIC_MBOX_MSG_RSS_CFG_CONT:
1037 nic_config_rss(nic, &mbx.rss_cfg); 1053 nic_config_rss(nic, &mbx.rss_cfg);
@@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1039 case NIC_MBOX_MSG_CFG_DONE: 1055 case NIC_MBOX_MSG_CFG_DONE:
1040 /* Last message of VF config msg sequence */ 1056 /* Last message of VF config msg sequence */
1041 nic_enable_vf(nic, vf, true); 1057 nic_enable_vf(nic, vf, true);
1042 goto unlock; 1058 break;
1043 case NIC_MBOX_MSG_SHUTDOWN: 1059 case NIC_MBOX_MSG_SHUTDOWN:
1044 /* First msg in VF teardown sequence */ 1060 /* First msg in VF teardown sequence */
1045 if (vf >= nic->num_vf_en) 1061 if (vf >= nic->num_vf_en)
@@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1049 break; 1065 break;
1050 case NIC_MBOX_MSG_ALLOC_SQS: 1066 case NIC_MBOX_MSG_ALLOC_SQS:
1051 nic_alloc_sqs(nic, &mbx.sqs_alloc); 1067 nic_alloc_sqs(nic, &mbx.sqs_alloc);
1052 goto unlock; 1068 return;
1053 case NIC_MBOX_MSG_NICVF_PTR: 1069 case NIC_MBOX_MSG_NICVF_PTR:
1054 nic->nicvf[vf] = mbx.nicvf.nicvf; 1070 nic->nicvf[vf] = mbx.nicvf.nicvf;
1055 break; 1071 break;
1056 case NIC_MBOX_MSG_PNICVF_PTR: 1072 case NIC_MBOX_MSG_PNICVF_PTR:
1057 nic_send_pnicvf(nic, vf); 1073 nic_send_pnicvf(nic, vf);
1058 goto unlock; 1074 return;
1059 case NIC_MBOX_MSG_SNICVF_PTR: 1075 case NIC_MBOX_MSG_SNICVF_PTR:
1060 nic_send_snicvf(nic, &mbx.nicvf); 1076 nic_send_snicvf(nic, &mbx.nicvf);
1061 goto unlock; 1077 return;
1062 case NIC_MBOX_MSG_BGX_STATS: 1078 case NIC_MBOX_MSG_BGX_STATS:
1063 nic_get_bgx_stats(nic, &mbx.bgx_stats); 1079 nic_get_bgx_stats(nic, &mbx.bgx_stats);
1064 goto unlock; 1080 return;
1065 case NIC_MBOX_MSG_LOOPBACK: 1081 case NIC_MBOX_MSG_LOOPBACK:
1066 ret = nic_config_loopback(nic, &mbx.lbk); 1082 ret = nic_config_loopback(nic, &mbx.lbk);
1067 break; 1083 break;
@@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1070 break; 1086 break;
1071 case NIC_MBOX_MSG_PFC: 1087 case NIC_MBOX_MSG_PFC:
1072 nic_pause_frame(nic, vf, &mbx.pfc); 1088 nic_pause_frame(nic, vf, &mbx.pfc);
1073 goto unlock; 1089 return;
1074 case NIC_MBOX_MSG_PTP_CFG: 1090 case NIC_MBOX_MSG_PTP_CFG:
1075 nic_config_timestamp(nic, vf, &mbx.ptp); 1091 nic_config_timestamp(nic, vf, &mbx.ptp);
1076 break; 1092 break;
@@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1094 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1110 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1095 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1111 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1096 bgx_set_dmac_cam_filter(nic->node, bgx, lmac, 1112 bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
1097 mbx.xcast.data.mac, 1113 mbx.xcast.mac,
1098 vf < NIC_VF_PER_MBX_REG ? vf : 1114 vf < NIC_VF_PER_MBX_REG ? vf :
1099 vf - NIC_VF_PER_MBX_REG); 1115 vf - NIC_VF_PER_MBX_REG);
1100 break; 1116 break;
@@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1106 } 1122 }
1107 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1123 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1108 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1124 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1109 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode); 1125 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
1110 break; 1126 break;
1127 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
1128 if (vf >= nic->num_vf_en) {
1129 ret = -1; /* NACK */
1130 break;
1131 }
1132 nic_link_status_get(nic, vf);
1133 return;
1111 default: 1134 default:
1112 dev_err(&nic->pdev->dev, 1135 dev_err(&nic->pdev->dev,
1113 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 1136 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1121 mbx.msg.msg, vf); 1144 mbx.msg.msg, vf);
1122 nic_mbx_send_nack(nic, vf); 1145 nic_mbx_send_nack(nic, vf);
1123 } 1146 }
1124unlock:
1125 nic->mbx_lock[vf] = false;
1126} 1147}
1127 1148
1128static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) 1149static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
@@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
1270 return 0; 1291 return 0;
1271} 1292}
1272 1293
1273/* Poll for BGX LMAC link status and update corresponding VF
1274 * if there is a change, valid only if internal L2 switch
1275 * is not present otherwise VF link is always treated as up
1276 */
1277static void nic_poll_for_link(struct work_struct *work)
1278{
1279 union nic_mbx mbx = {};
1280 struct nicpf *nic;
1281 struct bgx_link_status link;
1282 u8 vf, bgx, lmac;
1283
1284 nic = container_of(work, struct nicpf, dwork.work);
1285
1286 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1287
1288 for (vf = 0; vf < nic->num_vf_en; vf++) {
1289 /* Poll only if VF is UP */
1290 if (!nic->vf_enabled[vf])
1291 continue;
1292
1293 /* Get BGX, LMAC indices for the VF */
1294 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1295 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1296 /* Get interface link status */
1297 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
1298
1299 /* Inform VF only if link status changed */
1300 if (nic->link[vf] == link.link_up)
1301 continue;
1302
1303 if (!nic->mbx_lock[vf]) {
1304 nic->link[vf] = link.link_up;
1305 nic->duplex[vf] = link.duplex;
1306 nic->speed[vf] = link.speed;
1307
1308 /* Send a mbox message to VF with current link status */
1309 mbx.link_status.link_up = link.link_up;
1310 mbx.link_status.duplex = link.duplex;
1311 mbx.link_status.speed = link.speed;
1312 mbx.link_status.mac_type = link.mac_type;
1313 nic_send_msg_to_vf(nic, vf, &mbx);
1314 }
1315 }
1316 queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
1317}
1318
1319static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1294static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1320{ 1295{
1321 struct device *dev = &pdev->dev; 1296 struct device *dev = &pdev->dev;
@@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1384 if (!nic->vf_lmac_map) 1359 if (!nic->vf_lmac_map)
1385 goto err_release_regions; 1360 goto err_release_regions;
1386 1361
1387 nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
1388 if (!nic->link)
1389 goto err_release_regions;
1390
1391 nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
1392 if (!nic->duplex)
1393 goto err_release_regions;
1394
1395 nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL);
1396 if (!nic->speed)
1397 goto err_release_regions;
1398
1399 /* Initialize hardware */ 1362 /* Initialize hardware */
1400 nic_init_hw(nic); 1363 nic_init_hw(nic);
1401 1364
@@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1411 if (err) 1374 if (err)
1412 goto err_unregister_interrupts; 1375 goto err_unregister_interrupts;
1413 1376
1414 /* Register a physical link status poll fn() */
1415 nic->check_link = alloc_workqueue("check_link_status",
1416 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1417 if (!nic->check_link) {
1418 err = -ENOMEM;
1419 goto err_disable_sriov;
1420 }
1421
1422 INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
1423 queue_delayed_work(nic->check_link, &nic->dwork, 0);
1424
1425 return 0; 1377 return 0;
1426 1378
1427err_disable_sriov:
1428 if (nic->flags & NIC_SRIOV_ENABLED)
1429 pci_disable_sriov(pdev);
1430err_unregister_interrupts: 1379err_unregister_interrupts:
1431 nic_unregister_interrupts(nic); 1380 nic_unregister_interrupts(nic);
1432err_release_regions: 1381err_release_regions:
@@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev)
1447 if (nic->flags & NIC_SRIOV_ENABLED) 1396 if (nic->flags & NIC_SRIOV_ENABLED)
1448 pci_disable_sriov(pdev); 1397 pci_disable_sriov(pdev);
1449 1398
1450 if (nic->check_link) {
1451 /* Destroy work Queue */
1452 cancel_delayed_work_sync(&nic->dwork);
1453 destroy_workqueue(nic->check_link);
1454 }
1455
1456 nic_unregister_interrupts(nic); 1399 nic_unregister_interrupts(nic);
1457 pci_release_regions(pdev); 1400 pci_release_regions(pdev);
1458 1401
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 88f8a8fa93cd..503cfadff4ac 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444);
68MODULE_PARM_DESC(cpi_alg, 68MODULE_PARM_DESC(cpi_alg,
69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
70 70
71/* workqueue for handling kernel ndo_set_rx_mode() calls */
72static struct workqueue_struct *nicvf_rx_mode_wq;
73
74static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) 71static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
75{ 72{
76 if (nic->sqs_mode) 73 if (nic->sqs_mode)
@@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
127{ 124{
128 int timeout = NIC_MBOX_MSG_TIMEOUT; 125 int timeout = NIC_MBOX_MSG_TIMEOUT;
129 int sleep = 10; 126 int sleep = 10;
127 int ret = 0;
128
129 mutex_lock(&nic->rx_mode_mtx);
130 130
131 nic->pf_acked = false; 131 nic->pf_acked = false;
132 nic->pf_nacked = false; 132 nic->pf_nacked = false;
@@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
139 netdev_err(nic->netdev, 139 netdev_err(nic->netdev,
140 "PF NACK to mbox msg 0x%02x from VF%d\n", 140 "PF NACK to mbox msg 0x%02x from VF%d\n",
141 (mbx->msg.msg & 0xFF), nic->vf_id); 141 (mbx->msg.msg & 0xFF), nic->vf_id);
142 return -EINVAL; 142 ret = -EINVAL;
143 break;
143 } 144 }
144 msleep(sleep); 145 msleep(sleep);
145 if (nic->pf_acked) 146 if (nic->pf_acked)
@@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
149 netdev_err(nic->netdev, 150 netdev_err(nic->netdev,
150 "PF didn't ACK to mbox msg 0x%02x from VF%d\n", 151 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
151 (mbx->msg.msg & 0xFF), nic->vf_id); 152 (mbx->msg.msg & 0xFF), nic->vf_id);
152 return -EBUSY; 153 ret = -EBUSY;
154 break;
153 } 155 }
154 } 156 }
155 return 0; 157 mutex_unlock(&nic->rx_mode_mtx);
158 return ret;
156} 159}
157 160
158/* Checks if VF is able to comminicate with PF 161/* Checks if VF is able to comminicate with PF
@@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
172 return 1; 175 return 1;
173} 176}
174 177
178static void nicvf_send_cfg_done(struct nicvf *nic)
179{
180 union nic_mbx mbx = {};
181
182 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
183 if (nicvf_send_msg_to_pf(nic, &mbx)) {
184 netdev_err(nic->netdev,
185 "PF didn't respond to CFG DONE msg\n");
186 }
187}
188
175static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) 189static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
176{ 190{
177 if (bgx->rx) 191 if (bgx->rx)
@@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
228 break; 242 break;
229 case NIC_MBOX_MSG_BGX_LINK_CHANGE: 243 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
230 nic->pf_acked = true; 244 nic->pf_acked = true;
231 nic->link_up = mbx.link_status.link_up; 245 if (nic->link_up != mbx.link_status.link_up) {
232 nic->duplex = mbx.link_status.duplex; 246 nic->link_up = mbx.link_status.link_up;
233 nic->speed = mbx.link_status.speed; 247 nic->duplex = mbx.link_status.duplex;
234 nic->mac_type = mbx.link_status.mac_type; 248 nic->speed = mbx.link_status.speed;
235 if (nic->link_up) { 249 nic->mac_type = mbx.link_status.mac_type;
236 netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", 250 if (nic->link_up) {
237 nic->speed, 251 netdev_info(nic->netdev,
238 nic->duplex == DUPLEX_FULL ? 252 "Link is Up %d Mbps %s duplex\n",
239 "Full" : "Half"); 253 nic->speed,
240 netif_carrier_on(nic->netdev); 254 nic->duplex == DUPLEX_FULL ?
241 netif_tx_start_all_queues(nic->netdev); 255 "Full" : "Half");
242 } else { 256 netif_carrier_on(nic->netdev);
243 netdev_info(nic->netdev, "Link is Down\n"); 257 netif_tx_start_all_queues(nic->netdev);
244 netif_carrier_off(nic->netdev); 258 } else {
245 netif_tx_stop_all_queues(nic->netdev); 259 netdev_info(nic->netdev, "Link is Down\n");
260 netif_carrier_off(nic->netdev);
261 netif_tx_stop_all_queues(nic->netdev);
262 }
246 } 263 }
247 break; 264 break;
248 case NIC_MBOX_MSG_ALLOC_SQS: 265 case NIC_MBOX_MSG_ALLOC_SQS:
@@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
1311 struct nicvf_cq_poll *cq_poll = NULL; 1328 struct nicvf_cq_poll *cq_poll = NULL;
1312 union nic_mbx mbx = {}; 1329 union nic_mbx mbx = {};
1313 1330
1331 cancel_delayed_work_sync(&nic->link_change_work);
1332
1333 /* wait till all queued set_rx_mode tasks completes */
1334 drain_workqueue(nic->nicvf_rx_mode_wq);
1335
1314 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1336 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1315 nicvf_send_msg_to_pf(nic, &mbx); 1337 nicvf_send_msg_to_pf(nic, &mbx);
1316 1338
@@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1410 return nicvf_send_msg_to_pf(nic, &mbx); 1432 return nicvf_send_msg_to_pf(nic, &mbx);
1411} 1433}
1412 1434
1435static void nicvf_link_status_check_task(struct work_struct *work_arg)
1436{
1437 struct nicvf *nic = container_of(work_arg,
1438 struct nicvf,
1439 link_change_work.work);
1440 union nic_mbx mbx = {};
1441 mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1442 nicvf_send_msg_to_pf(nic, &mbx);
1443 queue_delayed_work(nic->nicvf_rx_mode_wq,
1444 &nic->link_change_work, 2 * HZ);
1445}
1446
1413int nicvf_open(struct net_device *netdev) 1447int nicvf_open(struct net_device *netdev)
1414{ 1448{
1415 int cpu, err, qidx; 1449 int cpu, err, qidx;
1416 struct nicvf *nic = netdev_priv(netdev); 1450 struct nicvf *nic = netdev_priv(netdev);
1417 struct queue_set *qs = nic->qs; 1451 struct queue_set *qs = nic->qs;
1418 struct nicvf_cq_poll *cq_poll = NULL; 1452 struct nicvf_cq_poll *cq_poll = NULL;
1419 union nic_mbx mbx = {}; 1453
1454 /* wait till all queued set_rx_mode tasks completes if any */
1455 drain_workqueue(nic->nicvf_rx_mode_wq);
1420 1456
1421 netif_carrier_off(netdev); 1457 netif_carrier_off(netdev);
1422 1458
@@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev)
1512 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1548 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1513 1549
1514 /* Send VF config done msg to PF */ 1550 /* Send VF config done msg to PF */
1515 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 1551 nicvf_send_cfg_done(nic);
1516 nicvf_write_to_mbx(nic, &mbx); 1552
1553 INIT_DELAYED_WORK(&nic->link_change_work,
1554 nicvf_link_status_check_task);
1555 queue_delayed_work(nic->nicvf_rx_mode_wq,
1556 &nic->link_change_work, 0);
1517 1557
1518 return 0; 1558 return 0;
1519cleanup: 1559cleanup:
@@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1941 1981
1942 /* flush DMAC filters and reset RX mode */ 1982 /* flush DMAC filters and reset RX mode */
1943 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; 1983 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1944 nicvf_send_msg_to_pf(nic, &mbx); 1984 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1985 goto free_mc;
1945 1986
1946 if (mode & BGX_XCAST_MCAST_FILTER) { 1987 if (mode & BGX_XCAST_MCAST_FILTER) {
1947 /* once enabling filtering, we need to signal to PF to add 1988 /* once enabling filtering, we need to signal to PF to add
1948 * its' own LMAC to the filter to accept packets for it. 1989 * its' own LMAC to the filter to accept packets for it.
1949 */ 1990 */
1950 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 1991 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1951 mbx.xcast.data.mac = 0; 1992 mbx.xcast.mac = 0;
1952 nicvf_send_msg_to_pf(nic, &mbx); 1993 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1994 goto free_mc;
1953 } 1995 }
1954 1996
1955 /* check if we have any specific MACs to be added to PF DMAC filter */ 1997 /* check if we have any specific MACs to be added to PF DMAC filter */
@@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1957 /* now go through kernel list of MACs and add them one by one */ 1999 /* now go through kernel list of MACs and add them one by one */
1958 for (idx = 0; idx < mc_addrs->count; idx++) { 2000 for (idx = 0; idx < mc_addrs->count; idx++) {
1959 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 2001 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1960 mbx.xcast.data.mac = mc_addrs->mc[idx]; 2002 mbx.xcast.mac = mc_addrs->mc[idx];
1961 nicvf_send_msg_to_pf(nic, &mbx); 2003 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
2004 goto free_mc;
1962 } 2005 }
1963 kfree(mc_addrs);
1964 } 2006 }
1965 2007
1966 /* and finally set rx mode for PF accordingly */ 2008 /* and finally set rx mode for PF accordingly */
1967 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; 2009 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
1968 mbx.xcast.data.mode = mode; 2010 mbx.xcast.mode = mode;
1969 2011
1970 nicvf_send_msg_to_pf(nic, &mbx); 2012 nicvf_send_msg_to_pf(nic, &mbx);
2013free_mc:
2014 kfree(mc_addrs);
1971} 2015}
1972 2016
1973static void nicvf_set_rx_mode_task(struct work_struct *work_arg) 2017static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1974{ 2018{
1975 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, 2019 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
1976 work.work); 2020 work);
1977 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); 2021 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
1978 u8 mode; 2022 u8 mode;
1979 struct xcast_addr_list *mc; 2023 struct xcast_addr_list *mc;
@@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
2030 kfree(nic->rx_mode_work.mc); 2074 kfree(nic->rx_mode_work.mc);
2031 nic->rx_mode_work.mc = mc_list; 2075 nic->rx_mode_work.mc = mc_list;
2032 nic->rx_mode_work.mode = mode; 2076 nic->rx_mode_work.mode = mode;
2033 queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); 2077 queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
2034 spin_unlock(&nic->rx_mode_wq_lock); 2078 spin_unlock(&nic->rx_mode_wq_lock);
2035} 2079}
2036 2080
@@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2187 2231
2188 INIT_WORK(&nic->reset_task, nicvf_reset_task); 2232 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2189 2233
2190 INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); 2234 nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
2235 WQ_MEM_RECLAIM,
2236 nic->vf_id);
2237 INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2191 spin_lock_init(&nic->rx_mode_wq_lock); 2238 spin_lock_init(&nic->rx_mode_wq_lock);
2239 mutex_init(&nic->rx_mode_mtx);
2192 2240
2193 err = register_netdev(netdev); 2241 err = register_netdev(netdev);
2194 if (err) { 2242 if (err) {
@@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev)
2228 nic = netdev_priv(netdev); 2276 nic = netdev_priv(netdev);
2229 pnetdev = nic->pnicvf->netdev; 2277 pnetdev = nic->pnicvf->netdev;
2230 2278
2231 cancel_delayed_work_sync(&nic->rx_mode_work.work);
2232
2233 /* Check if this Qset is assigned to different VF. 2279 /* Check if this Qset is assigned to different VF.
2234 * If yes, clean primary and all secondary Qsets. 2280 * If yes, clean primary and all secondary Qsets.
2235 */ 2281 */
2236 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) 2282 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
2237 unregister_netdev(pnetdev); 2283 unregister_netdev(pnetdev);
2284 if (nic->nicvf_rx_mode_wq) {
2285 destroy_workqueue(nic->nicvf_rx_mode_wq);
2286 nic->nicvf_rx_mode_wq = NULL;
2287 }
2238 nicvf_unregister_interrupts(nic); 2288 nicvf_unregister_interrupts(nic);
2239 pci_set_drvdata(pdev, NULL); 2289 pci_set_drvdata(pdev, NULL);
2240 if (nic->drv_stats) 2290 if (nic->drv_stats)
@@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = {
2261static int __init nicvf_init_module(void) 2311static int __init nicvf_init_module(void)
2262{ 2312{
2263 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 2313 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2264 nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
2265 WQ_MEM_RECLAIM);
2266 return pci_register_driver(&nicvf_driver); 2314 return pci_register_driver(&nicvf_driver);
2267} 2315}
2268 2316
2269static void __exit nicvf_cleanup_module(void) 2317static void __exit nicvf_cleanup_module(void)
2270{ 2318{
2271 if (nicvf_rx_mode_wq) {
2272 destroy_workqueue(nicvf_rx_mode_wq);
2273 nicvf_rx_mode_wq = NULL;
2274 }
2275 pci_unregister_driver(&nicvf_driver); 2319 pci_unregister_driver(&nicvf_driver);
2276} 2320}
2277 2321
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index e337da6ba2a4..673c57b8023f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx)
1217 1217
1218 /* Disable MAC steering (NCSI traffic) */ 1218 /* Disable MAC steering (NCSI traffic) */
1219 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 1219 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); 1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
1221} 1221}
1222 1222
1223static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) 1223static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cbdd20b9ee6f..5cbc54e9eb19 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -60,7 +60,7 @@
60#define RX_DMACX_CAM_EN BIT_ULL(48) 60#define RX_DMACX_CAM_EN BIT_ULL(48)
61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) 61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49)
62#define RX_DMAC_COUNT 32 62#define RX_DMAC_COUNT 32
63#define BGX_CMR_RX_STREERING 0x300 63#define BGX_CMR_RX_STEERING 0x300
64#define RX_TRAFFIC_STEER_RULE_COUNT 8 64#define RX_TRAFFIC_STEER_RULE_COUNT 8
65#define BGX_CMR_CHAN_MSK_AND 0x450 65#define BGX_CMR_CHAN_MSK_AND 0x450
66#define BGX_CMR_BIST_STATUS 0x460 66#define BGX_CMR_BIST_STATUS 0x460
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index c041f44324db..b3654598a2d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
660 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; 660 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
661 lld->udb_density = 1 << adap->params.sge.eq_qpp; 661 lld->udb_density = 1 << adap->params.sge.eq_qpp;
662 lld->ucq_density = 1 << adap->params.sge.iq_qpp; 662 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
663 lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
663 lld->filt_mode = adap->params.tp.vlan_pri_map; 664 lld->filt_mode = adap->params.tp.vlan_pri_map;
664 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 665 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
665 for (i = 0; i < NCHAN; i++) 666 for (i = 0; i < NCHAN; i++)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 5fa9a2d5fc4b..21da34a4ca24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -336,6 +336,7 @@ struct cxgb4_lld_info {
336 unsigned int cclk_ps; /* Core clock period in psec */ 336 unsigned int cclk_ps; /* Core clock period in psec */
337 unsigned short udb_density; /* # of user DB/page */ 337 unsigned short udb_density; /* # of user DB/page */
338 unsigned short ucq_density; /* # of user CQs/page */ 338 unsigned short ucq_density; /* # of user CQs/page */
339 unsigned int sge_host_page_size; /* SGE host page size */
339 unsigned short filt_mode; /* filter optional components */ 340 unsigned short filt_mode; /* filter optional components */
340 unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */ 341 unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
341 /* scheduler queue */ 342 /* scheduler queue */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2370dc204202..697c2427f2b7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
2098#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2098#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2099 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2099 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2100 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2100 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2101static __u32 fec_enet_register_version = 2;
2101static u32 fec_enet_register_offset[] = { 2102static u32 fec_enet_register_offset[] = {
2102 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2103 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2103 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2104 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = {
2128 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2129 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2129}; 2130};
2130#else 2131#else
2132static __u32 fec_enet_register_version = 1;
2131static u32 fec_enet_register_offset[] = { 2133static u32 fec_enet_register_offset[] = {
2132 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2134 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2133 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2135 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
@@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
2149 u32 *buf = (u32 *)regbuf; 2151 u32 *buf = (u32 *)regbuf;
2150 u32 i, off; 2152 u32 i, off;
2151 2153
2154 regs->version = fec_enet_register_version;
2155
2152 memset(buf, 0, regs->len); 2156 memset(buf, 0, regs->len);
2153 2157
2154 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { 2158 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 3b9e74be5fbd..ac55db065f16 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
3081 dsaf_dev = dev_get_drvdata(&pdev->dev); 3081 dsaf_dev = dev_get_drvdata(&pdev->dev);
3082 if (!dsaf_dev) { 3082 if (!dsaf_dev) {
3083 dev_err(&pdev->dev, "dsaf_dev is NULL\n"); 3083 dev_err(&pdev->dev, "dsaf_dev is NULL\n");
3084 put_device(&pdev->dev);
3084 return -ENODEV; 3085 return -ENODEV;
3085 } 3086 }
3086 3087
@@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
3088 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { 3089 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
3089 dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", 3090 dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
3090 dsaf_dev->ae_dev.name); 3091 dsaf_dev->ae_dev.name);
3092 put_device(&pdev->dev);
3091 return -ENODEV; 3093 return -ENODEV;
3092 } 3094 }
3093 3095
@@ -3126,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
3126 dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); 3128 dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
3127 dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); 3129 dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
3128 } 3130 }
3131
3132 put_device(&pdev->dev);
3133
3129 return 0; 3134 return 0;
3130} 3135}
3131EXPORT_SYMBOL(hns_dsaf_roce_reset); 3136EXPORT_SYMBOL(hns_dsaf_roce_reset);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f52e2c46e6a7..e4ff531db14a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : 3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3291 if (!ok) { 3291 if (!ok) {
3292 /* Log this in case the user has forgotten to give the kernel
3293 * any buffers, even later in the application.
3294 */
3292 dev_info(&vsi->back->pdev->dev, 3295 dev_info(&vsi->back->pdev->dev,
3293 "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", 3296 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3294 ring->xsk_umem ? "UMEM enabled " : "", 3297 ring->xsk_umem ? "UMEM enabled " : "",
3295 ring->queue_index, pf_q); 3298 ring->queue_index, pf_q);
3296 } 3299 }
@@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi)
6725 6728
6726 for (i = 0; i < vsi->num_queue_pairs; i++) { 6729 for (i = 0; i < vsi->num_queue_pairs; i++) {
6727 i40e_clean_tx_ring(vsi->tx_rings[i]); 6730 i40e_clean_tx_ring(vsi->tx_rings[i]);
6728 if (i40e_enabled_xdp_vsi(vsi)) 6731 if (i40e_enabled_xdp_vsi(vsi)) {
6732 /* Make sure that in-progress ndo_xdp_xmit
6733 * calls are completed.
6734 */
6735 synchronize_rcu();
6729 i40e_clean_tx_ring(vsi->xdp_rings[i]); 6736 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6737 }
6730 i40e_clean_rx_ring(vsi->rx_rings[i]); 6738 i40e_clean_rx_ring(vsi->rx_rings[i]);
6731 } 6739 }
6732 6740
@@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
11895 if (old_prog) 11903 if (old_prog)
11896 bpf_prog_put(old_prog); 11904 bpf_prog_put(old_prog);
11897 11905
11906 /* Kick start the NAPI context if there is an AF_XDP socket open
11907 * on that queue id. This so that receiving will start.
11908 */
11909 if (need_reset && prog)
11910 for (i = 0; i < vsi->num_queue_pairs; i++)
11911 if (vsi->xdp_rings[i]->xsk_umem)
11912 (void)i40e_xsk_async_xmit(vsi->netdev, i);
11913
11898 return 0; 11914 return 0;
11899} 11915}
11900 11916
@@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
11955static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) 11971static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
11956{ 11972{
11957 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); 11973 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
11958 if (i40e_enabled_xdp_vsi(vsi)) 11974 if (i40e_enabled_xdp_vsi(vsi)) {
11975 /* Make sure that in-progress ndo_xdp_xmit calls are
11976 * completed.
11977 */
11978 synchronize_rcu();
11959 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); 11979 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
11980 }
11960 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); 11981 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
11961} 11982}
11962 11983
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a7e14e98889f..6c97667d20ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3709 struct i40e_netdev_priv *np = netdev_priv(dev); 3709 struct i40e_netdev_priv *np = netdev_priv(dev);
3710 unsigned int queue_index = smp_processor_id(); 3710 unsigned int queue_index = smp_processor_id();
3711 struct i40e_vsi *vsi = np->vsi; 3711 struct i40e_vsi *vsi = np->vsi;
3712 struct i40e_pf *pf = vsi->back;
3712 struct i40e_ring *xdp_ring; 3713 struct i40e_ring *xdp_ring;
3713 int drops = 0; 3714 int drops = 0;
3714 int i; 3715 int i;
@@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3716 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 3717 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3717 return -ENETDOWN; 3718 return -ENETDOWN;
3718 3719
3719 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) 3720 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3721 test_bit(__I40E_CONFIG_BUSY, pf->state))
3720 return -ENXIO; 3722 return -ENXIO;
3721 3723
3722 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 3724 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 870cf654e436..3827f16e6923 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
183 err = i40e_queue_pair_enable(vsi, qid); 183 err = i40e_queue_pair_enable(vsi, qid);
184 if (err) 184 if (err)
185 return err; 185 return err;
186
187 /* Kick start the NAPI context so that receiving will start */
188 err = i40e_xsk_async_xmit(vsi->netdev, qid);
189 if (err)
190 return err;
186 } 191 }
187 192
188 return 0; 193 return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index daff8183534b..cb35d8202572 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3953 else 3953 else
3954 mrqc = IXGBE_MRQC_VMDQRSS64EN; 3954 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3955 3955
3956 /* Enable L3/L4 for Tx Switched packets */ 3956 /* Enable L3/L4 for Tx Switched packets only for X550,
3957 mrqc |= IXGBE_MRQC_L3L4TXSWEN; 3957 * older devices do not support this feature
3958 */
3959 if (hw->mac.type >= ixgbe_mac_X550)
3960 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3958 } else { 3961 } else {
3959 if (tcs > 4) 3962 if (tcs > 4)
3960 mrqc = IXGBE_MRQC_RTRSS8TCEN; 3963 mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10225 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 10228 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10226 struct ixgbe_adapter *adapter = netdev_priv(dev); 10229 struct ixgbe_adapter *adapter = netdev_priv(dev);
10227 struct bpf_prog *old_prog; 10230 struct bpf_prog *old_prog;
10231 bool need_reset;
10228 10232
10229 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 10233 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10230 return -EINVAL; 10234 return -EINVAL;
@@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10247 return -ENOMEM; 10251 return -ENOMEM;
10248 10252
10249 old_prog = xchg(&adapter->xdp_prog, prog); 10253 old_prog = xchg(&adapter->xdp_prog, prog);
10254 need_reset = (!!prog != !!old_prog);
10250 10255
10251 /* If transitioning XDP modes reconfigure rings */ 10256 /* If transitioning XDP modes reconfigure rings */
10252 if (!!prog != !!old_prog) { 10257 if (need_reset) {
10253 int err = ixgbe_setup_tc(dev, adapter->hw_tcs); 10258 int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10254 10259
10255 if (err) { 10260 if (err) {
@@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10265 if (old_prog) 10270 if (old_prog)
10266 bpf_prog_put(old_prog); 10271 bpf_prog_put(old_prog);
10267 10272
10273 /* Kick start the NAPI context if there is an AF_XDP socket open
10274 * on that queue id. This so that receiving will start.
10275 */
10276 if (need_reset && prog)
10277 for (i = 0; i < adapter->num_rx_queues; i++)
10278 if (adapter->xdp_ring[i]->xsk_umem)
10279 (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
10280
10268 return 0; 10281 return 0;
10269} 10282}
10270 10283
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 65c3e2c979d4..36a8879536a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
144 ixgbe_txrx_ring_disable(adapter, qid); 144 ixgbe_txrx_ring_disable(adapter, qid);
145 145
146 err = ixgbe_add_xsk_umem(adapter, umem, qid); 146 err = ixgbe_add_xsk_umem(adapter, umem, qid);
147 if (err)
148 return err;
147 149
148 if (if_running) 150 if (if_running) {
149 ixgbe_txrx_ring_enable(adapter, qid); 151 ixgbe_txrx_ring_enable(adapter, qid);
150 152
151 return err; 153 /* Kick start the NAPI context so that receiving will start */
154 err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
155 if (err)
156 return err;
157 }
158
159 return 0;
152} 160}
153 161
154static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) 162static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
@@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
634 dma_addr_t dma; 642 dma_addr_t dma;
635 643
636 while (budget-- > 0) { 644 while (budget-- > 0) {
637 if (unlikely(!ixgbe_desc_unused(xdp_ring))) { 645 if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
646 !netif_carrier_ok(xdp_ring->netdev)) {
638 work_done = false; 647 work_done = false;
639 break; 648 break;
640 } 649 }
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2f427271a793..292a668ce88e 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2879 2879
2880 ret = mv643xx_eth_shared_of_probe(pdev); 2880 ret = mv643xx_eth_shared_of_probe(pdev);
2881 if (ret) 2881 if (ret)
2882 return ret; 2882 goto err_put_clk;
2883 pd = dev_get_platdata(&pdev->dev); 2883 pd = dev_get_platdata(&pdev->dev);
2884 2884
2885 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2885 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2887 infer_hw_params(msp); 2887 infer_hw_params(msp);
2888 2888
2889 return 0; 2889 return 0;
2890
2891err_put_clk:
2892 if (!IS_ERR(msp->clk))
2893 clk_disable_unprepare(msp->clk);
2894 return ret;
2890} 2895}
2891 2896
2892static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2897static int mv643xx_eth_shared_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d4568eb2297..8433fb9c3eee 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2146,7 +2146,7 @@ err_drop_frame:
2146 if (unlikely(!skb)) 2146 if (unlikely(!skb))
2147 goto err_drop_frame_ret_pool; 2147 goto err_drop_frame_ret_pool;
2148 2148
2149 dma_sync_single_range_for_cpu(dev->dev.parent, 2149 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2150 rx_desc->buf_phys_addr, 2150 rx_desc->buf_phys_addr,
2151 MVNETA_MH_SIZE + NET_SKB_PAD, 2151 MVNETA_MH_SIZE + NET_SKB_PAD,
2152 rx_bytes, 2152 rx_bytes,
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f3a5fa84860f..57727fe1501e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5073 INIT_WORK(&hw->restart_work, sky2_restart); 5073 INIT_WORK(&hw->restart_work, sky2_restart);
5074 5074
5075 pci_set_drvdata(pdev, hw); 5075 pci_set_drvdata(pdev, hw);
5076 pdev->d3_delay = 200; 5076 pdev->d3_delay = 300;
5077 5077
5078 return 0; 5078 return 0;
5079 5079
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6b88881b8e35..c1438ae52a11 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3360 dev->addr_len = ETH_ALEN; 3360 dev->addr_len = ETH_ALEN;
3361 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); 3361 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3362 if (!is_valid_ether_addr(dev->dev_addr)) { 3362 if (!is_valid_ether_addr(dev->dev_addr)) {
3363 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", 3363 en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
3364 priv->port, dev->dev_addr); 3364 priv->port, dev->dev_addr);
3365 err = -EINVAL; 3365 err = -EINVAL;
3366 goto out; 3366 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9a0881cb7f51..6c01314e87b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
617} 617}
618#endif 618#endif
619 619
620#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
621
620/* We reach this function only after checking that any of 622/* We reach this function only after checking that any of
621 * the (IPv4 | IPv6) bits are set in cqe->status. 623 * the (IPv4 | IPv6) bits are set in cqe->status.
622 */ 624 */
@@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
624 netdev_features_t dev_features) 626 netdev_features_t dev_features)
625{ 627{
626 __wsum hw_checksum = 0; 628 __wsum hw_checksum = 0;
629 void *hdr;
630
631 /* CQE csum doesn't cover padding octets in short ethernet
632 * frames. And the pad field is appended prior to calculating
633 * and appending the FCS field.
634 *
635 * Detecting these padded frames requires to verify and parse
636 * IP headers, so we simply force all those small frames to skip
637 * checksum complete.
638 */
639 if (short_frame(skb->len))
640 return -EINVAL;
627 641
628 void *hdr = (u8 *)va + sizeof(struct ethhdr); 642 hdr = (u8 *)va + sizeof(struct ethhdr);
629
630 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); 643 hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
631 644
632 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && 645 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
@@ -819,6 +832,11 @@ xdp_drop_no_cnt:
819 skb_record_rx_queue(skb, cq_ring); 832 skb_record_rx_queue(skb, cq_ring);
820 833
821 if (likely(dev->features & NETIF_F_RXCSUM)) { 834 if (likely(dev->features & NETIF_F_RXCSUM)) {
835 /* TODO: For IP non TCP/UDP packets when csum complete is
836 * not an option (not supported or any other reason) we can
837 * actually check cqe IPOK status bit and report
838 * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
839 */
822 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | 840 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
823 MLX4_CQE_STATUS_UDP)) && 841 MLX4_CQE_STATUS_UDP)) &&
824 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 842 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3e0fa8a8077b..e267ff93e8a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1583,6 +1583,24 @@ no_trig:
1583 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1583 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1584} 1584}
1585 1585
1586void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1587{
1588 struct mlx5_cmd *cmd = &dev->cmd;
1589 int i;
1590
1591 for (i = 0; i < cmd->max_reg_cmds; i++)
1592 while (down_trylock(&cmd->sem))
1593 mlx5_cmd_trigger_completions(dev);
1594
1595 while (down_trylock(&cmd->pages_sem))
1596 mlx5_cmd_trigger_completions(dev);
1597
1598 /* Unlock cmdif */
1599 up(&cmd->pages_sem);
1600 for (i = 0; i < cmd->max_reg_cmds; i++)
1601 up(&cmd->sem);
1602}
1603
1586static int status_to_err(u8 status) 1604static int status_to_err(u8 status)
1587{ 1605{
1588 return status ? -1 : 0; /* TBD more meaningful codes */ 1606 return status ? -1 : 0; /* TBD more meaningful codes */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8fa8fdd30b85..448a92561567 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -657,6 +657,7 @@ struct mlx5e_channel_stats {
657enum { 657enum {
658 MLX5E_STATE_OPENED, 658 MLX5E_STATE_OPENED,
659 MLX5E_STATE_DESTROYING, 659 MLX5E_STATE_DESTROYING,
660 MLX5E_STATE_XDP_TX_ENABLED,
660}; 661};
661 662
662struct mlx5e_rqt { 663struct mlx5e_rqt {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 3740177eed09..03b2a9f9c589 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
365 int sq_num; 365 int sq_num;
366 int i; 366 int i;
367 367
368 if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) 368 /* this flag is sufficient, no need to test internal sq state */
369 if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
369 return -ENETDOWN; 370 return -ENETDOWN;
370 371
371 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 372 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
378 379
379 sq = &priv->channels.c[sq_num]->xdpsq; 380 sq = &priv->channels.c[sq_num]->xdpsq;
380 381
381 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
382 return -ENETDOWN;
383
384 for (i = 0; i < n; i++) { 382 for (i = 0; i < n; i++) {
385 struct xdp_frame *xdpf = frames[i]; 383 struct xdp_frame *xdpf = frames[i];
386 struct mlx5e_xdp_info xdpi; 384 struct mlx5e_xdp_info xdpi;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 3a67cb3cd179..ee27a7c8cd87 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
50int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 50int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
51 u32 flags); 51 u32 flags);
52 52
53static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
54{
55 set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
56}
57
58static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
59{
60 clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
61 /* let other device's napi(s) see our new state */
62 synchronize_rcu();
63}
64
65static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
66{
67 return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
68}
69
53static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) 70static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
54{ 71{
55 if (sq->doorbell_cseg) { 72 if (sq->doorbell_cseg) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3bbccead2f63..47233b9a4f81 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -354,9 +354,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
354 354
355 new_channels.params = priv->channels.params; 355 new_channels.params = priv->channels.params;
356 new_channels.params.num_channels = count; 356 new_channels.params.num_channels = count;
357 if (!netif_is_rxfh_configured(priv->netdev))
358 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
359 MLX5E_INDIR_RQT_SIZE, count);
360 357
361 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 358 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
362 priv->channels.params = new_channels.params; 359 priv->channels.params = new_channels.params;
@@ -372,6 +369,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
372 if (arfs_enabled) 369 if (arfs_enabled)
373 mlx5e_arfs_disable(priv); 370 mlx5e_arfs_disable(priv);
374 371
372 if (!netif_is_rxfh_configured(priv->netdev))
373 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
374 MLX5E_INDIR_RQT_SIZE, count);
375
375 /* Switch to new channels, set new parameters and close old ones */ 376 /* Switch to new channels, set new parameters and close old ones */
376 mlx5e_switch_priv_channels(priv, &new_channels, NULL); 377 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
377 378
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 01819e5c9975..93e50ccd44c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2938,6 +2938,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2938 2938
2939 mlx5e_build_tx2sq_maps(priv); 2939 mlx5e_build_tx2sq_maps(priv);
2940 mlx5e_activate_channels(&priv->channels); 2940 mlx5e_activate_channels(&priv->channels);
2941 mlx5e_xdp_tx_enable(priv);
2941 netif_tx_start_all_queues(priv->netdev); 2942 netif_tx_start_all_queues(priv->netdev);
2942 2943
2943 if (mlx5e_is_vport_rep(priv)) 2944 if (mlx5e_is_vport_rep(priv))
@@ -2959,6 +2960,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2959 */ 2960 */
2960 netif_tx_stop_all_queues(priv->netdev); 2961 netif_tx_stop_all_queues(priv->netdev);
2961 netif_tx_disable(priv->netdev); 2962 netif_tx_disable(priv->netdev);
2963 mlx5e_xdp_tx_disable(priv);
2962 mlx5e_deactivate_channels(&priv->channels); 2964 mlx5e_deactivate_channels(&priv->channels);
2963} 2965}
2964 2966
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index fbc42b7252a9..503035469d2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -211,11 +211,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
211 enum port_module_event_status_type module_status; 211 enum port_module_event_status_type module_status;
212 enum port_module_event_error_type error_type; 212 enum port_module_event_error_type error_type;
213 struct mlx5_eqe_port_module *module_event_eqe; 213 struct mlx5_eqe_port_module *module_event_eqe;
214 const char *status_str, *error_str; 214 const char *status_str;
215 u8 module_num; 215 u8 module_num;
216 216
217 module_event_eqe = &eqe->data.port_module; 217 module_event_eqe = &eqe->data.port_module;
218 module_num = module_event_eqe->module;
219 module_status = module_event_eqe->module_status & 218 module_status = module_event_eqe->module_status &
220 PORT_MODULE_EVENT_MODULE_STATUS_MASK; 219 PORT_MODULE_EVENT_MODULE_STATUS_MASK;
221 error_type = module_event_eqe->error_type & 220 error_type = module_event_eqe->error_type &
@@ -223,25 +222,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
223 222
224 if (module_status < MLX5_MODULE_STATUS_NUM) 223 if (module_status < MLX5_MODULE_STATUS_NUM)
225 events->pme_stats.status_counters[module_status]++; 224 events->pme_stats.status_counters[module_status]++;
226 status_str = mlx5_pme_status_to_string(module_status);
227 225
228 if (module_status == MLX5_MODULE_STATUS_ERROR) { 226 if (module_status == MLX5_MODULE_STATUS_ERROR)
229 if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) 227 if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
230 events->pme_stats.error_counters[error_type]++; 228 events->pme_stats.error_counters[error_type]++;
231 error_str = mlx5_pme_error_to_string(error_type);
232 }
233 229
234 if (!printk_ratelimit()) 230 if (!printk_ratelimit())
235 return NOTIFY_OK; 231 return NOTIFY_OK;
236 232
237 if (module_status == MLX5_MODULE_STATUS_ERROR) 233 module_num = module_event_eqe->module;
234 status_str = mlx5_pme_status_to_string(module_status);
235 if (module_status == MLX5_MODULE_STATUS_ERROR) {
236 const char *error_str = mlx5_pme_error_to_string(error_type);
237
238 mlx5_core_err(events->dev, 238 mlx5_core_err(events->dev,
239 "Port module event[error]: module %u, %s, %s\n", 239 "Port module event[error]: module %u, %s, %s\n",
240 module_num, status_str, error_str); 240 module_num, status_str, error_str);
241 else 241 } else {
242 mlx5_core_info(events->dev, 242 mlx5_core_info(events->dev,
243 "Port module event: module %u, %s\n", 243 "Port module event: module %u, %s\n",
244 module_num, status_str); 244 module_num, status_str);
245 }
245 246
246 return NOTIFY_OK; 247 return NOTIFY_OK;
247} 248}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 196c07383082..cb9fa3430c53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
103 mlx5_core_err(dev, "start\n"); 103 mlx5_core_err(dev, "start\n");
104 if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { 104 if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
105 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 105 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
106 mlx5_cmd_trigger_completions(dev); 106 mlx5_cmd_flush(dev);
107 } 107 }
108 108
109 mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); 109 mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 5300b0b6d836..4fdac020b795 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -126,6 +126,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
126 struct ptp_system_timestamp *sts); 126 struct ptp_system_timestamp *sts);
127 127
128void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); 128void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
129void mlx5_cmd_flush(struct mlx5_core_dev *dev);
129int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); 130int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
130void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); 131void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
131 132
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 32519c93df17..b65e274b02e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
862 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 862 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
863 bool configure = false; 863 bool configure = false;
864 bool pfc = false; 864 bool pfc = false;
865 u16 thres_cells;
866 u16 delay_cells;
865 bool lossy; 867 bool lossy;
866 u16 thres;
867 868
868 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 869 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
869 if (prio_tc[j] == i) { 870 if (prio_tc[j] == i) {
@@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
877 continue; 878 continue;
878 879
879 lossy = !(pfc || pause_en); 880 lossy = !(pfc || pause_en);
880 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 881 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
881 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 882 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
882 pause_en); 883 pfc, pause_en);
883 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 884 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
885 thres_cells, lossy);
884 } 886 }
885 887
886 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 888 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e23ca90289f7..0a868c829b90 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1291 1291
1292static int 1292static int
1293wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1293wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1294 enum alu_op alu_op, bool skip) 1294 enum alu_op alu_op)
1295{ 1295{
1296 const struct bpf_insn *insn = &meta->insn; 1296 const struct bpf_insn *insn = &meta->insn;
1297 1297
1298 if (skip) {
1299 meta->skip = true;
1300 return 0;
1301 }
1302
1303 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1298 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
1304 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1299 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1305 1300
@@ -2309,7 +2304,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2309 2304
2310static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2305static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2311{ 2306{
2312 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 2307 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
2313} 2308}
2314 2309
2315static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2310static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2319,7 +2314,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2319 2314
2320static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2315static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2321{ 2316{
2322 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 2317 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
2323} 2318}
2324 2319
2325static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2320static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2329,7 +2324,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2329 2324
2330static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2325static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2331{ 2326{
2332 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2327 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
2333} 2328}
2334 2329
2335static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2330static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2339,7 +2334,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2339 2334
2340static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2335static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2341{ 2336{
2342 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2337 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
2343} 2338}
2344 2339
2345static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2340static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2349,7 +2344,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2349 2344
2350static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2345static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2351{ 2346{
2352 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2347 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
2353} 2348}
2354 2349
2355static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2350static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index beb8e5d6401a..ded556b7bab5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1688 1688
1689 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); 1689 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1690 1690
1691 if (!ether_addr_equal(ethh->h_dest,
1692 p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1693 DP_VERBOSE(p_hwfn,
1694 QED_MSG_RDMA,
1695 "Got unexpected mac %pM instead of %pM\n",
1696 ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1697 return -EINVAL;
1698 }
1699
1691 ether_addr_copy(remote_mac_addr, ethh->h_source); 1700 ether_addr_copy(remote_mac_addr, ethh->h_source);
1692 ether_addr_copy(local_mac_addr, ethh->h_dest); 1701 ether_addr_copy(local_mac_addr, ethh->h_dest);
1693 1702
@@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2605 struct qed_iwarp_info *iwarp_info; 2614 struct qed_iwarp_info *iwarp_info;
2606 struct qed_ll2_acquire_data data; 2615 struct qed_ll2_acquire_data data;
2607 struct qed_ll2_cbs cbs; 2616 struct qed_ll2_cbs cbs;
2608 u32 mpa_buff_size; 2617 u32 buff_size;
2609 u16 n_ooo_bufs; 2618 u16 n_ooo_bufs;
2610 int rc = 0; 2619 int rc = 0;
2611 int i; 2620 int i;
@@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2632 2641
2633 memset(&data, 0, sizeof(data)); 2642 memset(&data, 0, sizeof(data));
2634 data.input.conn_type = QED_LL2_TYPE_IWARP; 2643 data.input.conn_type = QED_LL2_TYPE_IWARP;
2635 data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; 2644 data.input.mtu = params->max_mtu;
2636 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; 2645 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2637 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; 2646 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2638 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ 2647 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
@@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2654 goto err; 2663 goto err;
2655 } 2664 }
2656 2665
2666 buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2657 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, 2667 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2658 QED_IWARP_LL2_SYN_RX_SIZE, 2668 QED_IWARP_LL2_SYN_RX_SIZE,
2659 QED_IWARP_MAX_SYN_PKT_SIZE, 2669 buff_size,
2660 iwarp_info->ll2_syn_handle); 2670 iwarp_info->ll2_syn_handle);
2661 if (rc) 2671 if (rc)
2662 goto err; 2672 goto err;
@@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2710 if (rc) 2720 if (rc)
2711 goto err; 2721 goto err;
2712 2722
2713 mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2714 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, 2723 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2715 data.input.rx_num_desc, 2724 data.input.rx_num_desc,
2716 mpa_buff_size, 2725 buff_size,
2717 iwarp_info->ll2_mpa_handle); 2726 iwarp_info->ll2_mpa_handle);
2718 if (rc) 2727 if (rc)
2719 goto err; 2728 goto err;
@@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2726 2735
2727 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; 2736 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2728 2737
2729 iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); 2738 iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
2730 if (!iwarp_info->mpa_intermediate_buf) 2739 if (!iwarp_info->mpa_intermediate_buf)
2731 goto err; 2740 goto err;
2732 2741
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index b8f612d00241..7ac959038324 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
46 46
47#define QED_IWARP_LL2_SYN_TX_SIZE (128) 47#define QED_IWARP_LL2_SYN_TX_SIZE (128)
48#define QED_IWARP_LL2_SYN_RX_SIZE (256) 48#define QED_IWARP_LL2_SYN_RX_SIZE (256)
49#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
50 49
51#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) 50#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256)
52#define QED_IWARP_MAX_OOO (16) 51#define QED_IWARP_MAX_OOO (16)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index abb94c543aa2..6e36b88ca7c9 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1286,11 +1286,13 @@ static u16 rtl_get_events(struct rtl8169_private *tp)
1286static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) 1286static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1287{ 1287{
1288 RTL_W16(tp, IntrStatus, bits); 1288 RTL_W16(tp, IntrStatus, bits);
1289 mmiowb();
1289} 1290}
1290 1291
1291static void rtl_irq_disable(struct rtl8169_private *tp) 1292static void rtl_irq_disable(struct rtl8169_private *tp)
1292{ 1293{
1293 RTL_W16(tp, IntrMask, 0); 1294 RTL_W16(tp, IntrMask, 0);
1295 mmiowb();
1294} 1296}
1295 1297
1296#define RTL_EVENT_NAPI_RX (RxOK | RxErr) 1298#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
@@ -6072,7 +6074,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6072 struct device *d = tp_to_dev(tp); 6074 struct device *d = tp_to_dev(tp);
6073 dma_addr_t mapping; 6075 dma_addr_t mapping;
6074 u32 opts[2], len; 6076 u32 opts[2], len;
6075 bool stop_queue;
6076 int frags; 6077 int frags;
6077 6078
6078 if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { 6079 if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
@@ -6114,6 +6115,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6114 6115
6115 txd->opts2 = cpu_to_le32(opts[1]); 6116 txd->opts2 = cpu_to_le32(opts[1]);
6116 6117
6118 netdev_sent_queue(dev, skb->len);
6119
6117 skb_tx_timestamp(skb); 6120 skb_tx_timestamp(skb);
6118 6121
6119 /* Force memory writes to complete before releasing descriptor */ 6122 /* Force memory writes to complete before releasing descriptor */
@@ -6126,14 +6129,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6126 6129
6127 tp->cur_tx += frags + 1; 6130 tp->cur_tx += frags + 1;
6128 6131
6129 stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); 6132 RTL_W8(tp, TxPoll, NPQ);
6130 if (unlikely(stop_queue))
6131 netif_stop_queue(dev);
6132 6133
6133 if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) 6134 mmiowb();
6134 RTL_W8(tp, TxPoll, NPQ);
6135 6135
6136 if (unlikely(stop_queue)) { 6136 if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
6137 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
6138 * not miss a ring update when it notices a stopped queue.
6139 */
6140 smp_wmb();
6141 netif_stop_queue(dev);
6137 /* Sync with rtl_tx: 6142 /* Sync with rtl_tx:
6138 * - publish queue status and cur_tx ring index (write barrier) 6143 * - publish queue status and cur_tx ring index (write barrier)
6139 * - refresh dirty_tx ring index (read barrier). 6144 * - refresh dirty_tx ring index (read barrier).
@@ -6483,7 +6488,9 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
6483 6488
6484 if (work_done < budget) { 6489 if (work_done < budget) {
6485 napi_complete_done(napi, work_done); 6490 napi_complete_done(napi, work_done);
6491
6486 rtl_irq_enable(tp); 6492 rtl_irq_enable(tp);
6493 mmiowb();
6487 } 6494 }
6488 6495
6489 return work_done; 6496 return work_done;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 2f2bda68d861..c08034154a9a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6115,7 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6115static int efx_ef10_mtd_probe(struct efx_nic *efx) 6115static int efx_ef10_mtd_probe(struct efx_nic *efx)
6116{ 6116{
6117 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); 6117 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
6118 DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT); 6118 DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
6119 struct efx_mcdi_mtd_partition *parts; 6119 struct efx_mcdi_mtd_partition *parts;
6120 size_t outlen, n_parts_total, i, n_parts; 6120 size_t outlen, n_parts_total, i, n_parts;
6121 unsigned int type; 6121 unsigned int type;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 20299f6f65fc..736e29635b77 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
241static int dwmac4_rx_check_timestamp(void *desc) 241static int dwmac4_rx_check_timestamp(void *desc)
242{ 242{
243 struct dma_desc *p = (struct dma_desc *)desc; 243 struct dma_desc *p = (struct dma_desc *)desc;
244 unsigned int rdes0 = le32_to_cpu(p->des0);
245 unsigned int rdes1 = le32_to_cpu(p->des1);
246 unsigned int rdes3 = le32_to_cpu(p->des3);
244 u32 own, ctxt; 247 u32 own, ctxt;
245 int ret = 1; 248 int ret = 1;
246 249
247 own = p->des3 & RDES3_OWN; 250 own = rdes3 & RDES3_OWN;
248 ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) 251 ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
249 >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); 252 >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
250 253
251 if (likely(!own && ctxt)) { 254 if (likely(!own && ctxt)) {
252 if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) 255 if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
253 /* Corrupted value */ 256 /* Corrupted value */
254 ret = -EINVAL; 257 ret = -EINVAL;
255 else 258 else
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 5d85742a2be0..3c749c327cbd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -696,25 +696,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
696 struct ethtool_eee *edata) 696 struct ethtool_eee *edata)
697{ 697{
698 struct stmmac_priv *priv = netdev_priv(dev); 698 struct stmmac_priv *priv = netdev_priv(dev);
699 int ret;
699 700
700 priv->eee_enabled = edata->eee_enabled; 701 if (!edata->eee_enabled) {
701
702 if (!priv->eee_enabled)
703 stmmac_disable_eee_mode(priv); 702 stmmac_disable_eee_mode(priv);
704 else { 703 } else {
705 /* We are asking for enabling the EEE but it is safe 704 /* We are asking for enabling the EEE but it is safe
706 * to verify all by invoking the eee_init function. 705 * to verify all by invoking the eee_init function.
707 * In case of failure it will return an error. 706 * In case of failure it will return an error.
708 */ 707 */
709 priv->eee_enabled = stmmac_eee_init(priv); 708 edata->eee_enabled = stmmac_eee_init(priv);
710 if (!priv->eee_enabled) 709 if (!edata->eee_enabled)
711 return -EOPNOTSUPP; 710 return -EOPNOTSUPP;
712
713 /* Do not change tx_lpi_timer in case of failure */
714 priv->tx_lpi_timer = edata->tx_lpi_timer;
715 } 711 }
716 712
717 return phy_ethtool_set_eee(dev->phydev, edata); 713 ret = phy_ethtool_set_eee(dev->phydev, edata);
714 if (ret)
715 return ret;
716
717 priv->eee_enabled = edata->eee_enabled;
718 priv->tx_lpi_timer = edata->tx_lpi_timer;
719 return 0;
718} 720}
719 721
720static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) 722static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1f612268c998..d847f672a705 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
259 const char *name; 259 const char *name;
260 char node_name[32]; 260 char node_name[32];
261 261
262 if (of_property_read_string(node, "label", &name) < 0) { 262 if (of_property_read_string(child, "label", &name) < 0) {
263 snprintf(node_name, sizeof(node_name), "%pOFn", child); 263 snprintf(node_name, sizeof(node_name), "%pOFn", child);
264 name = node_name; 264 name = node_name;
265 } 265 }
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 7cdac77d0c68..07e41c42bcf5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -499,6 +499,8 @@ static int ipvlan_nl_changelink(struct net_device *dev,
499 499
500 if (!data) 500 if (!data)
501 return 0; 501 return 0;
502 if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
503 return -EPERM;
502 504
503 if (data[IFLA_IPVLAN_MODE]) { 505 if (data[IFLA_IPVLAN_MODE]) {
504 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); 506 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -601,6 +603,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
601 struct ipvl_dev *tmp = netdev_priv(phy_dev); 603 struct ipvl_dev *tmp = netdev_priv(phy_dev);
602 604
603 phy_dev = tmp->phy_dev; 605 phy_dev = tmp->phy_dev;
606 if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
607 return -EPERM;
604 } else if (!netif_is_ipvlan_port(phy_dev)) { 608 } else if (!netif_is_ipvlan_port(phy_dev)) {
605 /* Exit early if the underlying link is invalid or busy */ 609 /* Exit early if the underlying link is invalid or busy */
606 if (phy_dev->type != ARPHRD_ETHER || 610 if (phy_dev->type != ARPHRD_ETHER ||
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 82ab6ed3b74e..6bac602094bd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -26,6 +26,8 @@
26#include <linux/marvell_phy.h> 26#include <linux/marvell_phy.h>
27#include <linux/phy.h> 27#include <linux/phy.h>
28 28
29#define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK 0x01e0
30
29enum { 31enum {
30 MV_PCS_BASE_T = 0x0000, 32 MV_PCS_BASE_T = 0x0000,
31 MV_PCS_BASE_R = 0x1000, 33 MV_PCS_BASE_R = 0x1000,
@@ -386,8 +388,10 @@ static int mv3310_config_aneg(struct phy_device *phydev)
386 else 388 else
387 reg = 0; 389 reg = 0;
388 390
391 /* Make sure we clear unsupported 2.5G/5G advertising */
389 ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, 392 ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
390 MDIO_AN_10GBT_CTRL_ADV10G, reg); 393 MDIO_AN_10GBT_CTRL_ADV10G |
394 MDIO_AN_10GBT_CTRL_ADV_NBT_MASK, reg);
391 if (ret < 0) 395 if (ret < 0)
392 return ret; 396 return ret;
393 if (ret > 0) 397 if (ret > 0)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 66b9cfe692fc..7368616286ae 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -379,7 +379,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
379 err = device_register(&bus->dev); 379 err = device_register(&bus->dev);
380 if (err) { 380 if (err) {
381 pr_err("mii_bus %s failed to register\n", bus->id); 381 pr_err("mii_bus %s failed to register\n", bus->id);
382 put_device(&bus->dev);
383 return -EINVAL; 382 return -EINVAL;
384 } 383 }
385 384
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 189cd2048c3a..c5675df5fc6f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -553,7 +553,7 @@ int phy_start_aneg(struct phy_device *phydev)
553 if (err < 0) 553 if (err < 0)
554 goto out_unlock; 554 goto out_unlock;
555 555
556 if (__phy_is_started(phydev)) { 556 if (phy_is_started(phydev)) {
557 if (phydev->autoneg == AUTONEG_ENABLE) { 557 if (phydev->autoneg == AUTONEG_ENABLE) {
558 err = phy_check_link_status(phydev); 558 err = phy_check_link_status(phydev);
559 } else { 559 } else {
@@ -709,7 +709,7 @@ void phy_stop_machine(struct phy_device *phydev)
709 cancel_delayed_work_sync(&phydev->state_queue); 709 cancel_delayed_work_sync(&phydev->state_queue);
710 710
711 mutex_lock(&phydev->lock); 711 mutex_lock(&phydev->lock);
712 if (__phy_is_started(phydev)) 712 if (phy_is_started(phydev))
713 phydev->state = PHY_UP; 713 phydev->state = PHY_UP;
714 mutex_unlock(&phydev->lock); 714 mutex_unlock(&phydev->lock);
715} 715}
@@ -762,9 +762,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
762{ 762{
763 struct phy_device *phydev = phy_dat; 763 struct phy_device *phydev = phy_dat;
764 764
765 if (!phy_is_started(phydev))
766 return IRQ_NONE; /* It can't be ours. */
767
768 if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) 765 if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
769 return IRQ_NONE; 766 return IRQ_NONE;
770 767
@@ -842,15 +839,14 @@ EXPORT_SYMBOL(phy_stop_interrupts);
842 */ 839 */
843void phy_stop(struct phy_device *phydev) 840void phy_stop(struct phy_device *phydev)
844{ 841{
845 mutex_lock(&phydev->lock); 842 if (!phy_is_started(phydev)) {
846
847 if (!__phy_is_started(phydev)) {
848 WARN(1, "called from state %s\n", 843 WARN(1, "called from state %s\n",
849 phy_state_to_str(phydev->state)); 844 phy_state_to_str(phydev->state));
850 mutex_unlock(&phydev->lock);
851 return; 845 return;
852 } 846 }
853 847
848 mutex_lock(&phydev->lock);
849
854 if (phy_interrupt_is_valid(phydev)) 850 if (phy_interrupt_is_valid(phydev))
855 phy_disable_interrupts(phydev); 851 phy_disable_interrupts(phydev);
856 852
@@ -989,8 +985,10 @@ void phy_state_machine(struct work_struct *work)
989 * state machine would be pointless and possibly error prone when 985 * state machine would be pointless and possibly error prone when
990 * called from phy_disconnect() synchronously. 986 * called from phy_disconnect() synchronously.
991 */ 987 */
988 mutex_lock(&phydev->lock);
992 if (phy_polling_mode(phydev) && phy_is_started(phydev)) 989 if (phy_polling_mode(phydev) && phy_is_started(phydev))
993 phy_queue_state_machine(phydev, PHY_STATE_TIME); 990 phy_queue_state_machine(phydev, PHY_STATE_TIME);
991 mutex_unlock(&phydev->lock);
994} 992}
995 993
996/** 994/**
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index e7becc7379d7..938803237d7f 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -474,6 +474,17 @@ static void phylink_run_resolve(struct phylink *pl)
474 queue_work(system_power_efficient_wq, &pl->resolve); 474 queue_work(system_power_efficient_wq, &pl->resolve);
475} 475}
476 476
477static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
478{
479 unsigned long state = pl->phylink_disable_state;
480
481 set_bit(bit, &pl->phylink_disable_state);
482 if (state == 0) {
483 queue_work(system_power_efficient_wq, &pl->resolve);
484 flush_work(&pl->resolve);
485 }
486}
487
477static void phylink_fixed_poll(struct timer_list *t) 488static void phylink_fixed_poll(struct timer_list *t)
478{ 489{
479 struct phylink *pl = container_of(t, struct phylink, link_poll); 490 struct phylink *pl = container_of(t, struct phylink, link_poll);
@@ -924,9 +935,7 @@ void phylink_stop(struct phylink *pl)
924 if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) 935 if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
925 del_timer_sync(&pl->link_poll); 936 del_timer_sync(&pl->link_poll);
926 937
927 set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); 938 phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
928 queue_work(system_power_efficient_wq, &pl->resolve);
929 flush_work(&pl->resolve);
930} 939}
931EXPORT_SYMBOL_GPL(phylink_stop); 940EXPORT_SYMBOL_GPL(phylink_stop);
932 941
@@ -1632,9 +1641,7 @@ static void phylink_sfp_link_down(void *upstream)
1632 1641
1633 ASSERT_RTNL(); 1642 ASSERT_RTNL();
1634 1643
1635 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); 1644 phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
1636 queue_work(system_power_efficient_wq, &pl->resolve);
1637 flush_work(&pl->resolve);
1638} 1645}
1639 1646
1640static void phylink_sfp_link_up(void *upstream) 1647static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index c6010fb1aa0f..cb4a23041a94 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -282,6 +282,13 @@ static struct phy_driver realtek_drvs[] = {
282 .name = "RTL8366RB Gigabit Ethernet", 282 .name = "RTL8366RB Gigabit Ethernet",
283 .features = PHY_GBIT_FEATURES, 283 .features = PHY_GBIT_FEATURES,
284 .config_init = &rtl8366rb_config_init, 284 .config_init = &rtl8366rb_config_init,
285 /* These interrupts are handled by the irq controller
286 * embedded inside the RTL8366RB, they get unmasked when the
287 * irq is requested and ACKed by reading the status register,
288 * which is done by the irqchip code.
289 */
290 .ack_interrupt = genphy_no_ack_interrupt,
291 .config_intr = genphy_no_config_intr,
285 .suspend = genphy_suspend, 292 .suspend = genphy_suspend,
286 .resume = genphy_resume, 293 .resume = genphy_resume,
287 }, 294 },
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index ad9db652874d..fef701bfad62 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
347 return ret; 347 return ret;
348 } 348 }
349 } 349 }
350 bus->socket_ops->attach(bus->sfp);
350 if (bus->started) 351 if (bus->started)
351 bus->socket_ops->start(bus->sfp); 352 bus->socket_ops->start(bus->sfp);
352 bus->netdev->sfp_bus = bus; 353 bus->netdev->sfp_bus = bus;
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
362 if (bus->registered) { 363 if (bus->registered) {
363 if (bus->started) 364 if (bus->started)
364 bus->socket_ops->stop(bus->sfp); 365 bus->socket_ops->stop(bus->sfp);
366 bus->socket_ops->detach(bus->sfp);
365 if (bus->phydev && ops && ops->disconnect_phy) 367 if (bus->phydev && ops && ops->disconnect_phy)
366 ops->disconnect_phy(bus->upstream); 368 ops->disconnect_phy(bus->upstream);
367 } 369 }
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index fd8bb998ae52..68c8fbf099f8 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -184,6 +184,7 @@ struct sfp {
184 184
185 struct gpio_desc *gpio[GPIO_MAX]; 185 struct gpio_desc *gpio[GPIO_MAX];
186 186
187 bool attached;
187 unsigned int state; 188 unsigned int state;
188 struct delayed_work poll; 189 struct delayed_work poll;
189 struct delayed_work timeout; 190 struct delayed_work timeout;
@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
1475 */ 1476 */
1476 switch (sfp->sm_mod_state) { 1477 switch (sfp->sm_mod_state) {
1477 default: 1478 default:
1478 if (event == SFP_E_INSERT) { 1479 if (event == SFP_E_INSERT && sfp->attached) {
1479 sfp_module_tx_disable(sfp); 1480 sfp_module_tx_disable(sfp);
1480 sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); 1481 sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
1481 } 1482 }
@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
1607 mutex_unlock(&sfp->sm_mutex); 1608 mutex_unlock(&sfp->sm_mutex);
1608} 1609}
1609 1610
1611static void sfp_attach(struct sfp *sfp)
1612{
1613 sfp->attached = true;
1614 if (sfp->state & SFP_F_PRESENT)
1615 sfp_sm_event(sfp, SFP_E_INSERT);
1616}
1617
1618static void sfp_detach(struct sfp *sfp)
1619{
1620 sfp->attached = false;
1621 sfp_sm_event(sfp, SFP_E_REMOVE);
1622}
1623
1610static void sfp_start(struct sfp *sfp) 1624static void sfp_start(struct sfp *sfp)
1611{ 1625{
1612 sfp_sm_event(sfp, SFP_E_DEV_UP); 1626 sfp_sm_event(sfp, SFP_E_DEV_UP);
@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
1667} 1681}
1668 1682
1669static const struct sfp_socket_ops sfp_module_ops = { 1683static const struct sfp_socket_ops sfp_module_ops = {
1684 .attach = sfp_attach,
1685 .detach = sfp_detach,
1670 .start = sfp_start, 1686 .start = sfp_start,
1671 .stop = sfp_stop, 1687 .stop = sfp_stop,
1672 .module_info = sfp_module_info, 1688 .module_info = sfp_module_info,
@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
1834 dev_info(sfp->dev, "Host maximum power %u.%uW\n", 1850 dev_info(sfp->dev, "Host maximum power %u.%uW\n",
1835 sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); 1851 sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
1836 1852
1837 sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
1838 if (!sfp->sfp_bus)
1839 return -ENOMEM;
1840
1841 /* Get the initial state, and always signal TX disable, 1853 /* Get the initial state, and always signal TX disable,
1842 * since the network interface will not be up. 1854 * since the network interface will not be up.
1843 */ 1855 */
@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
1848 sfp->state |= SFP_F_RATE_SELECT; 1860 sfp->state |= SFP_F_RATE_SELECT;
1849 sfp_set_state(sfp, sfp->state); 1861 sfp_set_state(sfp, sfp->state);
1850 sfp_module_tx_disable(sfp); 1862 sfp_module_tx_disable(sfp);
1851 rtnl_lock();
1852 if (sfp->state & SFP_F_PRESENT)
1853 sfp_sm_event(sfp, SFP_E_INSERT);
1854 rtnl_unlock();
1855 1863
1856 for (i = 0; i < GPIO_MAX; i++) { 1864 for (i = 0; i < GPIO_MAX; i++) {
1857 if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) 1865 if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
1884 dev_warn(sfp->dev, 1892 dev_warn(sfp->dev,
1885 "No tx_disable pin: SFP modules will always be emitting.\n"); 1893 "No tx_disable pin: SFP modules will always be emitting.\n");
1886 1894
1895 sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
1896 if (!sfp->sfp_bus)
1897 return -ENOMEM;
1898
1887 return 0; 1899 return 0;
1888} 1900}
1889 1901
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 31b0acf337e2..64f54b0bbd8c 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -7,6 +7,8 @@
7struct sfp; 7struct sfp;
8 8
9struct sfp_socket_ops { 9struct sfp_socket_ops {
10 void (*attach)(struct sfp *sfp);
11 void (*detach)(struct sfp *sfp);
10 void (*start)(struct sfp *sfp); 12 void (*start)(struct sfp *sfp);
11 void (*stop)(struct sfp *sfp); 13 void (*stop)(struct sfp *sfp);
12 int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); 14 int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 74a8782313cf..bd6084e315de 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
44 u16 val = 0; 44 u16 val = 0;
45 int err; 45 int err;
46 46
47 err = priv->phy_drv->read_status(phydev); 47 if (priv->phy_drv->read_status)
48 err = priv->phy_drv->read_status(phydev);
49 else
50 err = genphy_read_status(phydev);
48 if (err < 0) 51 if (err < 0)
49 return err; 52 return err;
50 53
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index afd9d25d1992..6ce3f666d142 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
256 } 256 }
257} 257}
258 258
259static bool __team_option_inst_tmp_find(const struct list_head *opts,
260 const struct team_option_inst *needle)
261{
262 struct team_option_inst *opt_inst;
263
264 list_for_each_entry(opt_inst, opts, tmp_list)
265 if (opt_inst == needle)
266 return true;
267 return false;
268}
269
270static int __team_options_register(struct team *team, 259static int __team_options_register(struct team *team,
271 const struct team_option *option, 260 const struct team_option *option,
272 size_t option_count) 261 size_t option_count)
@@ -1267,7 +1256,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1267 list_add_tail_rcu(&port->list, &team->port_list); 1256 list_add_tail_rcu(&port->list, &team->port_list);
1268 team_port_enable(team, port); 1257 team_port_enable(team, port);
1269 __team_compute_features(team); 1258 __team_compute_features(team);
1270 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); 1259 __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1271 __team_options_change_check(team); 1260 __team_options_change_check(team);
1272 1261
1273 netdev_info(dev, "Port device %s added\n", portname); 1262 netdev_info(dev, "Port device %s added\n", portname);
@@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2460 int err = 0; 2449 int err = 0;
2461 int i; 2450 int i;
2462 struct nlattr *nl_option; 2451 struct nlattr *nl_option;
2463 LIST_HEAD(opt_inst_list);
2464 2452
2465 rtnl_lock(); 2453 rtnl_lock();
2466 2454
@@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2480 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; 2468 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2481 struct nlattr *attr; 2469 struct nlattr *attr;
2482 struct nlattr *attr_data; 2470 struct nlattr *attr_data;
2471 LIST_HEAD(opt_inst_list);
2483 enum team_option_type opt_type; 2472 enum team_option_type opt_type;
2484 int opt_port_ifindex = 0; /* != 0 for per-port options */ 2473 int opt_port_ifindex = 0; /* != 0 for per-port options */
2485 u32 opt_array_index = 0; 2474 u32 opt_array_index = 0;
@@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2584 if (err) 2573 if (err)
2585 goto team_put; 2574 goto team_put;
2586 opt_inst->changed = true; 2575 opt_inst->changed = true;
2587
2588 /* dumb/evil user-space can send us duplicate opt,
2589 * keep only the last one
2590 */
2591 if (__team_option_inst_tmp_find(&opt_inst_list,
2592 opt_inst))
2593 continue;
2594
2595 list_add(&opt_inst->tmp_list, &opt_inst_list); 2576 list_add(&opt_inst->tmp_list, &opt_inst_list);
2596 } 2577 }
2597 if (!opt_found) { 2578 if (!opt_found) {
2598 err = -ENOENT; 2579 err = -ENOENT;
2599 goto team_put; 2580 goto team_put;
2600 } 2581 }
2601 }
2602 2582
2603 err = team_nl_send_event_options_get(team, &opt_inst_list); 2583 err = team_nl_send_event_options_get(team, &opt_inst_list);
2584 if (err)
2585 break;
2586 }
2604 2587
2605team_put: 2588team_put:
2606 team_nl_team_put(team); 2589 team_nl_team_put(team);
@@ -2932,7 +2915,7 @@ static int team_device_event(struct notifier_block *unused,
2932 2915
2933 switch (event) { 2916 switch (event) {
2934 case NETDEV_UP: 2917 case NETDEV_UP:
2935 if (netif_carrier_ok(dev)) 2918 if (netif_oper_up(dev))
2936 team_port_change_check(port, true); 2919 team_port_change_check(port, true);
2937 break; 2920 break;
2938 case NETDEV_DOWN: 2921 case NETDEV_DOWN:
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 735ad838e2ba..18af2f8eee96 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = {
1201 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1201 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1202 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 1202 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
1203 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 1203 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
1204 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ 1204 {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
1205 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ 1205 {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
1206 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 1206 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
1207 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 1207 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
1208 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 1208 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 60dd1ec1665f..86c8c64fbb0f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -557,6 +557,7 @@ enum spd_duplex {
557/* MAC PASSTHRU */ 557/* MAC PASSTHRU */
558#define AD_MASK 0xfee0 558#define AD_MASK 0xfee0
559#define BND_MASK 0x0004 559#define BND_MASK 0x0004
560#define BD_MASK 0x0001
560#define EFUSE 0xcfdb 561#define EFUSE 0xcfdb
561#define PASS_THRU_MASK 0x1 562#define PASS_THRU_MASK 0x1
562 563
@@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
1176 return -ENODEV; 1177 return -ENODEV;
1177 } 1178 }
1178 } else { 1179 } else {
1179 /* test for RTL8153-BND */ 1180 /* test for RTL8153-BND and RTL8153-BD */
1180 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); 1181 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
1181 if ((ocp_data & BND_MASK) == 0) { 1182 if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
1182 netif_dbg(tp, probe, tp->netdev, 1183 netif_dbg(tp, probe, tp->netdev,
1183 "Invalid variant for MAC pass through\n"); 1184 "Invalid variant for MAC pass through\n");
1184 return -ENODEV; 1185 return -ENODEV;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 95909e262ba4..7c1430ed0244 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev)
1273 1273
1274 /* default to no qdisc; user can add if desired */ 1274 /* default to no qdisc; user can add if desired */
1275 dev->priv_flags |= IFF_NO_QUEUE; 1275 dev->priv_flags |= IFF_NO_QUEUE;
1276
1277 dev->min_mtu = 0;
1278 dev->max_mtu = 0;
1276} 1279}
1277 1280
1278static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], 1281static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5209ee9aac47..2aae11feff0c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2219,7 +2219,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2219 struct pcpu_sw_netstats *tx_stats, *rx_stats; 2219 struct pcpu_sw_netstats *tx_stats, *rx_stats;
2220 union vxlan_addr loopback; 2220 union vxlan_addr loopback;
2221 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 2221 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
2222 struct net_device *dev = skb->dev; 2222 struct net_device *dev;
2223 int len = skb->len; 2223 int len = skb->len;
2224 2224
2225 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 2225 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
@@ -2239,9 +2239,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2239#endif 2239#endif
2240 } 2240 }
2241 2241
2242 rcu_read_lock();
2243 dev = skb->dev;
2244 if (unlikely(!(dev->flags & IFF_UP))) {
2245 kfree_skb(skb);
2246 goto drop;
2247 }
2248
2242 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) 2249 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
2243 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, 2250 vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
2244 vni);
2245 2251
2246 u64_stats_update_begin(&tx_stats->syncp); 2252 u64_stats_update_begin(&tx_stats->syncp);
2247 tx_stats->tx_packets++; 2253 tx_stats->tx_packets++;
@@ -2254,8 +2260,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2254 rx_stats->rx_bytes += len; 2260 rx_stats->rx_bytes += len;
2255 u64_stats_update_end(&rx_stats->syncp); 2261 u64_stats_update_end(&rx_stats->syncp);
2256 } else { 2262 } else {
2263drop:
2257 dev->stats.rx_dropped++; 2264 dev->stats.rx_dropped++;
2258 } 2265 }
2266 rcu_read_unlock();
2259} 2267}
2260 2268
2261static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, 2269static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 320edcac4699..6359053bd0c7 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3554,7 +3554,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
3554 goto out_err; 3554 goto out_err;
3555 } 3555 }
3556 3556
3557 genlmsg_reply(skb, info); 3557 res = genlmsg_reply(skb, info);
3558 break; 3558 break;
3559 } 3559 }
3560 3560
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 0e6b43bb4678..a5ea3ba495a4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = {
158 .get_txpower = mt76x02_get_txpower, 158 .get_txpower = mt76x02_get_txpower,
159}; 159};
160 160
161static int mt76x0u_register_device(struct mt76x02_dev *dev) 161static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
162{ 162{
163 struct ieee80211_hw *hw = dev->mt76.hw;
164 int err; 163 int err;
165 164
166 err = mt76u_alloc_queues(&dev->mt76);
167 if (err < 0)
168 goto out_err;
169
170 err = mt76u_mcu_init_rx(&dev->mt76);
171 if (err < 0)
172 goto out_err;
173
174 mt76x0_chip_onoff(dev, true, true); 165 mt76x0_chip_onoff(dev, true, true);
175 if (!mt76x02_wait_for_mac(&dev->mt76)) { 166
176 err = -ETIMEDOUT; 167 if (!mt76x02_wait_for_mac(&dev->mt76))
177 goto out_err; 168 return -ETIMEDOUT;
178 }
179 169
180 err = mt76x0u_mcu_init(dev); 170 err = mt76x0u_mcu_init(dev);
181 if (err < 0) 171 if (err < 0)
182 goto out_err; 172 return err;
183 173
184 mt76x0_init_usb_dma(dev); 174 mt76x0_init_usb_dma(dev);
185 err = mt76x0_init_hardware(dev); 175 err = mt76x0_init_hardware(dev);
186 if (err < 0) 176 if (err < 0)
187 goto out_err; 177 return err;
188 178
189 mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); 179 mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
190 mt76_wr(dev, MT_TXOP_CTRL_CFG, 180 mt76_wr(dev, MT_TXOP_CTRL_CFG,
191 FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | 181 FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
192 FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); 182 FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
193 183
184 return 0;
185}
186
187static int mt76x0u_register_device(struct mt76x02_dev *dev)
188{
189 struct ieee80211_hw *hw = dev->mt76.hw;
190 int err;
191
192 err = mt76u_alloc_queues(&dev->mt76);
193 if (err < 0)
194 goto out_err;
195
196 err = mt76u_mcu_init_rx(&dev->mt76);
197 if (err < 0)
198 goto out_err;
199
200 err = mt76x0u_init_hardware(dev);
201 if (err < 0)
202 goto out_err;
203
194 err = mt76x0_register_device(dev); 204 err = mt76x0_register_device(dev);
195 if (err < 0) 205 if (err < 0)
196 goto out_err; 206 goto out_err;
@@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
301 311
302 mt76u_stop_queues(&dev->mt76); 312 mt76u_stop_queues(&dev->mt76);
303 mt76x0u_mac_stop(dev); 313 mt76x0u_mac_stop(dev);
314 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
315 mt76x0_chip_onoff(dev, false, false);
304 usb_kill_urb(usb->mcu.res.urb); 316 usb_kill_urb(usb->mcu.res.urb);
305 317
306 return 0; 318 return 0;
@@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
328 tasklet_enable(&usb->rx_tasklet); 340 tasklet_enable(&usb->rx_tasklet);
329 tasklet_enable(&usb->tx_tasklet); 341 tasklet_enable(&usb->tx_tasklet);
330 342
331 ret = mt76x0_init_hardware(dev); 343 ret = mt76x0u_init_hardware(dev);
332 if (ret) 344 if (ret)
333 goto err; 345 goto err;
334 346
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 022ea1ee63f8..7fee665ec45e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2560,15 +2560,15 @@ static void nvme_reset_work(struct work_struct *work)
2560 mutex_lock(&dev->shutdown_lock); 2560 mutex_lock(&dev->shutdown_lock);
2561 result = nvme_pci_enable(dev); 2561 result = nvme_pci_enable(dev);
2562 if (result) 2562 if (result)
2563 goto out; 2563 goto out_unlock;
2564 2564
2565 result = nvme_pci_configure_admin_queue(dev); 2565 result = nvme_pci_configure_admin_queue(dev);
2566 if (result) 2566 if (result)
2567 goto out; 2567 goto out_unlock;
2568 2568
2569 result = nvme_alloc_admin_tags(dev); 2569 result = nvme_alloc_admin_tags(dev);
2570 if (result) 2570 if (result)
2571 goto out; 2571 goto out_unlock;
2572 2572
2573 /* 2573 /*
2574 * Limit the max command size to prevent iod->sg allocations going 2574 * Limit the max command size to prevent iod->sg allocations going
@@ -2651,6 +2651,8 @@ static void nvme_reset_work(struct work_struct *work)
2651 nvme_start_ctrl(&dev->ctrl); 2651 nvme_start_ctrl(&dev->ctrl);
2652 return; 2652 return;
2653 2653
2654 out_unlock:
2655 mutex_unlock(&dev->shutdown_lock);
2654 out: 2656 out:
2655 nvme_remove_dead_ctrl(dev, result); 2657 nvme_remove_dead_ctrl(dev, result);
2656} 2658}
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index c69ca95b1ad5..0f140a802137 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -693,7 +693,7 @@ static const char * const sd_a_groups[] = {
693 693
694static const char * const sdxc_a_groups[] = { 694static const char * const sdxc_a_groups[] = {
695 "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", 695 "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
696 "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a" 696 "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
697}; 697};
698 698
699static const char * const pcm_a_groups[] = { 699static const char * const pcm_a_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index 7aae52a09ff0..4ffd56ff809e 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -79,7 +79,7 @@ enum {
79 .intr_cfg_reg = 0, \ 79 .intr_cfg_reg = 0, \
80 .intr_status_reg = 0, \ 80 .intr_status_reg = 0, \
81 .intr_target_reg = 0, \ 81 .intr_target_reg = 0, \
82 .tile = NORTH, \ 82 .tile = SOUTH, \
83 .mux_bit = -1, \ 83 .mux_bit = -1, \
84 .pull_bit = pull, \ 84 .pull_bit = pull, \
85 .drv_bit = drv, \ 85 .drv_bit = drv, \
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 4e7b55a14b1a..6e294b4d3635 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
4469 usrparm.psf_data &= 0x7fffffffULL; 4469 usrparm.psf_data &= 0x7fffffffULL;
4470 usrparm.rssd_result &= 0x7fffffffULL; 4470 usrparm.rssd_result &= 0x7fffffffULL;
4471 } 4471 }
4472 /* at least 2 bytes are accessed and should be allocated */
4473 if (usrparm.psf_data_len < 2) {
4474 DBF_DEV_EVENT(DBF_WARNING, device,
4475 "Symmetrix ioctl invalid data length %d",
4476 usrparm.psf_data_len);
4477 rc = -EINVAL;
4478 goto out;
4479 }
4472 /* alloc I/O data area */ 4480 /* alloc I/O data area */
4473 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 4481 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
4474 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 4482 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 48ea0004a56d..5a699746c357 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
248static inline int ap_test_config_card_id(unsigned int id) 248static inline int ap_test_config_card_id(unsigned int id)
249{ 249{
250 if (!ap_configuration) /* QCI not supported */ 250 if (!ap_configuration) /* QCI not supported */
251 return 1; 251 /* only ids 0...3F may be probed */
252 return id < 0x40 ? 1 : 0;
252 return ap_test_config(ap_configuration->apm, id); 253 return ap_test_config(ap_configuration->apm, id);
253} 254}
254 255
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b8d325ce8754..120fc520f27a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1459,7 +1459,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
1459 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) 1459 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
1460 return -ENODATA; 1460 return -ENODATA;
1461 1461
1462 spin_lock_bh(&conn->session->back_lock);
1463 if (conn->task == NULL) {
1464 spin_unlock_bh(&conn->session->back_lock);
1465 return -ENODATA;
1466 }
1462 __iscsi_get_task(task); 1467 __iscsi_get_task(task);
1468 spin_unlock_bh(&conn->session->back_lock);
1463 spin_unlock_bh(&conn->session->frwd_lock); 1469 spin_unlock_bh(&conn->session->frwd_lock);
1464 rc = conn->session->tt->xmit_task(task); 1470 rc = conn->session->tt->xmit_task(task);
1465 spin_lock_bh(&conn->session->frwd_lock); 1471 spin_lock_bh(&conn->session->frwd_lock);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 17eb4185f29d..f21c93bbb35c 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -828,6 +828,7 @@ static struct domain_device *sas_ex_discover_end_dev(
828 rphy = sas_end_device_alloc(phy->port); 828 rphy = sas_end_device_alloc(phy->port);
829 if (!rphy) 829 if (!rphy)
830 goto out_free; 830 goto out_free;
831 rphy->identify.phy_identifier = phy_id;
831 832
832 child->rphy = rphy; 833 child->rphy = rphy;
833 get_device(&rphy->dev); 834 get_device(&rphy->dev);
@@ -854,6 +855,7 @@ static struct domain_device *sas_ex_discover_end_dev(
854 855
855 child->rphy = rphy; 856 child->rphy = rphy;
856 get_device(&rphy->dev); 857 get_device(&rphy->dev);
858 rphy->identify.phy_identifier = phy_id;
857 sas_fill_in_rphy(child, rphy); 859 sas_fill_in_rphy(child, rphy);
858 860
859 list_add_tail(&child->disco_list_node, &parent->port->disco_list); 861 list_add_tail(&child->disco_list_node, &parent->port->disco_list);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index aeeb0144bd55..8d1acc802a67 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1785,13 +1785,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1785 1785
1786 /* Issue Marker IOCB */ 1786 /* Issue Marker IOCB */
1787 qla2x00_marker(vha, vha->hw->req_q_map[0], 1787 qla2x00_marker(vha, vha->hw->req_q_map[0],
1788 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, 1788 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
1789 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 1789 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1790 } 1790 }
1791 1791
1792done_free_sp: 1792done_free_sp:
1793 sp->free(sp); 1793 sp->free(sp);
1794 sp->fcport->flags &= ~FCF_ASYNC_SENT; 1794 fcport->flags &= ~FCF_ASYNC_SENT;
1795done: 1795done:
1796 return rval; 1796 return rval;
1797} 1797}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6d65ac584eba..f8d51c3d5582 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -655,6 +655,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
655 set_host_byte(cmd, DID_OK); 655 set_host_byte(cmd, DID_OK);
656 return BLK_STS_TARGET; 656 return BLK_STS_TARGET;
657 case DID_NEXUS_FAILURE: 657 case DID_NEXUS_FAILURE:
658 set_host_byte(cmd, DID_OK);
658 return BLK_STS_NEXUS; 659 return BLK_STS_NEXUS;
659 case DID_ALLOC_FAILURE: 660 case DID_ALLOC_FAILURE:
660 set_host_byte(cmd, DID_OK); 661 set_host_byte(cmd, DID_OK);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b2da8a00ec33..5464d467e23e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2951,9 +2951,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2951 if (rot == 1) { 2951 if (rot == 1) {
2952 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2952 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2953 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2953 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2954 } else {
2955 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2956 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
2957 } 2954 }
2958 2955
2959 if (sdkp->device->type == TYPE_ZBC) { 2956 if (sdkp->device->type == TYPE_ZBC) {
@@ -3090,6 +3087,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
3090 if (sdkp->media_present) { 3087 if (sdkp->media_present) {
3091 sd_read_capacity(sdkp, buffer); 3088 sd_read_capacity(sdkp, buffer);
3092 3089
3090 /*
3091 * set the default to rotational. All non-rotational devices
3092 * support the block characteristics VPD page, which will
3093 * cause this to be updated correctly and any device which
3094 * doesn't support it should be treated as rotational.
3095 */
3096 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
3097 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
3098
3093 if (scsi_device_supports_vpd(sdp)) { 3099 if (scsi_device_supports_vpd(sdp)) {
3094 sd_read_block_provisioning(sdkp); 3100 sd_read_block_provisioning(sdkp);
3095 sd_read_block_limits(sdkp); 3101 sd_read_block_limits(sdkp);
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index fff86940388b..a340af797a85 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
142 return -EOPNOTSUPP; 142 return -EOPNOTSUPP;
143 143
144 /* 144 /*
145 * Get a reply buffer for the number of requested zones plus a header. 145 * Get a reply buffer for the number of requested zones plus a header,
146 * For ATA, buffers must be aligned to 512B. 146 * without exceeding the device maximum command size. For ATA disks,
147 * buffers must be aligned to 512B.
147 */ 148 */
148 buflen = roundup((nrz + 1) * 64, 512); 149 buflen = min(queue_max_hw_sectors(disk->queue) << 9,
150 roundup((nrz + 1) * 64, 512));
149 buf = kmalloc(buflen, gfp_mask); 151 buf = kmalloc(buflen, gfp_mask);
150 if (!buf) 152 if (!buf)
151 return -ENOMEM; 153 return -ENOMEM;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index dfd23245f778..6fff16113628 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -774,7 +774,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
774 774
775 cdev = __cpufreq_cooling_register(np, policy, capacitance); 775 cdev = __cpufreq_cooling_register(np, policy, capacitance);
776 if (IS_ERR(cdev)) { 776 if (IS_ERR(cdev)) {
777 pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n", 777 pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n",
778 policy->cpu, PTR_ERR(cdev)); 778 policy->cpu, PTR_ERR(cdev));
779 cdev = NULL; 779 cdev = NULL;
780 } 780 }
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 4bfdb4a1e47d..2df059cc07e2 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -867,14 +867,14 @@ __init *thermal_of_build_thermal_zone(struct device_node *np)
867 867
868 ret = of_property_read_u32(np, "polling-delay-passive", &prop); 868 ret = of_property_read_u32(np, "polling-delay-passive", &prop);
869 if (ret < 0) { 869 if (ret < 0) {
870 pr_err("missing polling-delay-passive property\n"); 870 pr_err("%pOFn: missing polling-delay-passive property\n", np);
871 goto free_tz; 871 goto free_tz;
872 } 872 }
873 tz->passive_delay = prop; 873 tz->passive_delay = prop;
874 874
875 ret = of_property_read_u32(np, "polling-delay", &prop); 875 ret = of_property_read_u32(np, "polling-delay", &prop);
876 if (ret < 0) { 876 if (ret < 0) {
877 pr_err("missing polling-delay property\n"); 877 pr_err("%pOFn: missing polling-delay property\n", np);
878 goto free_tz; 878 goto free_tz;
879 } 879 }
880 tz->polling_delay = prop; 880 tz->polling_delay = prop;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 24a129fcdd61..a2e5dc7716e2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1788 1788
1789 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, 1789 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1790 len, iov, 64, VHOST_ACCESS_WO); 1790 len, iov, 64, VHOST_ACCESS_WO);
1791 if (ret) 1791 if (ret < 0)
1792 return ret; 1792 return ret;
1793 1793
1794 for (i = 0; i < ret; i++) { 1794 for (i = 0; i < ret; i++) {
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index d0078cbb718b..e996174cbfc0 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -14,13 +14,30 @@
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16 16
17static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
18static inline char *next_non_spacetab(char *first, const char *last)
19{
20 for (; first <= last; first++)
21 if (!spacetab(*first))
22 return first;
23 return NULL;
24}
25static inline char *next_terminator(char *first, const char *last)
26{
27 for (; first <= last; first++)
28 if (spacetab(*first) || !*first)
29 return first;
30 return NULL;
31}
32
17static int load_script(struct linux_binprm *bprm) 33static int load_script(struct linux_binprm *bprm)
18{ 34{
19 const char *i_arg, *i_name; 35 const char *i_arg, *i_name;
20 char *cp; 36 char *cp, *buf_end;
21 struct file *file; 37 struct file *file;
22 int retval; 38 int retval;
23 39
40 /* Not ours to exec if we don't start with "#!". */
24 if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) 41 if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
25 return -ENOEXEC; 42 return -ENOEXEC;
26 43
@@ -33,23 +50,41 @@ static int load_script(struct linux_binprm *bprm)
33 if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) 50 if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
34 return -ENOENT; 51 return -ENOENT;
35 52
36 /* 53 /* Release since we are not mapping a binary into memory. */
37 * This section does the #! interpretation.
38 * Sorta complicated, but hopefully it will work. -TYT
39 */
40
41 allow_write_access(bprm->file); 54 allow_write_access(bprm->file);
42 fput(bprm->file); 55 fput(bprm->file);
43 bprm->file = NULL; 56 bprm->file = NULL;
44 57
45 for (cp = bprm->buf+2;; cp++) { 58 /*
46 if (cp >= bprm->buf + BINPRM_BUF_SIZE) 59 * This section handles parsing the #! line into separate
60 * interpreter path and argument strings. We must be careful
61 * because bprm->buf is not yet guaranteed to be NUL-terminated
62 * (though the buffer will have trailing NUL padding when the
63 * file size was smaller than the buffer size).
64 *
65 * We do not want to exec a truncated interpreter path, so either
66 * we find a newline (which indicates nothing is truncated), or
67 * we find a space/tab/NUL after the interpreter path (which
68 * itself may be preceded by spaces/tabs). Truncating the
69 * arguments is fine: the interpreter can re-read the script to
70 * parse them on its own.
71 */
72 buf_end = bprm->buf + sizeof(bprm->buf) - 1;
73 cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
74 if (!cp) {
75 cp = next_non_spacetab(bprm->buf + 2, buf_end);
76 if (!cp)
77 return -ENOEXEC; /* Entire buf is spaces/tabs */
78 /*
79 * If there is no later space/tab/NUL we must assume the
80 * interpreter path is truncated.
81 */
82 if (!next_terminator(cp, buf_end))
47 return -ENOEXEC; 83 return -ENOEXEC;
48 if (!*cp || (*cp == '\n')) 84 cp = buf_end;
49 break;
50 } 85 }
86 /* NUL-terminate the buffer and any trailing spaces/tabs. */
51 *cp = '\0'; 87 *cp = '\0';
52
53 while (cp > bprm->buf) { 88 while (cp > bprm->buf) {
54 cp--; 89 cp--;
55 if ((*cp == ' ') || (*cp == '\t')) 90 if ((*cp == ' ') || (*cp == '\t'))
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 041c27ea8de1..f74193da0e09 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
616 capsnap->size); 616 capsnap->size);
617 617
618 spin_lock(&mdsc->snap_flush_lock); 618 spin_lock(&mdsc->snap_flush_lock);
619 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); 619 if (list_empty(&ci->i_snap_flush_item))
620 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
620 spin_unlock(&mdsc->snap_flush_lock); 621 spin_unlock(&mdsc->snap_flush_lock);
621 return 1; /* caller may want to ceph_flush_snaps */ 622 return 1; /* caller may want to ceph_flush_snaps */
622} 623}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 712f00995390..5508baa11bb6 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
116 goto out; 116 goto out;
117 } 117 }
118 118
119 ret = file_write_and_wait_range(file, start, end);
120 if (ret)
121 return ret;
122
123 if (!journal) { 119 if (!journal) {
124 struct writeback_control wbc = { 120 ret = __generic_file_fsync(file, start, end, datasync);
125 .sync_mode = WB_SYNC_ALL
126 };
127
128 ret = ext4_write_inode(inode, &wbc);
129 if (!ret) 121 if (!ret)
130 ret = ext4_sync_parent(inode); 122 ret = ext4_sync_parent(inode);
131 if (test_opt(inode->i_sb, BARRIER)) 123 if (test_opt(inode->i_sb, BARRIER))
@@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
133 goto out; 125 goto out;
134 } 126 }
135 127
128 ret = file_write_and_wait_range(file, start, end);
129 if (ret)
130 return ret;
136 /* 131 /*
137 * data=writeback,ordered: 132 * data=writeback,ordered:
138 * The caller's filemap_fdatawrite()/wait will sync the data. 133 * The caller's filemap_fdatawrite()/wait will sync the data.
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index f15b4c57c4bd..78510ab91835 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -28,7 +28,6 @@
28#include "util.h" 28#include "util.h"
29#include "trans.h" 29#include "trans.h"
30#include "dir.h" 30#include "dir.h"
31#include "lops.h"
32 31
33struct workqueue_struct *gfs2_freeze_wq; 32struct workqueue_struct *gfs2_freeze_wq;
34 33
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 5bfaf381921a..b8830fda51e8 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -733,7 +733,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
733 lh->lh_crc = cpu_to_be32(crc); 733 lh->lh_crc = cpu_to_be32(crc);
734 734
735 gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); 735 gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr);
736 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags); 736 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, op_flags);
737 log_flush_wait(sdp); 737 log_flush_wait(sdp);
738} 738}
739 739
@@ -810,7 +810,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
810 810
811 gfs2_ordered_write(sdp); 811 gfs2_ordered_write(sdp);
812 lops_before_commit(sdp, tr); 812 lops_before_commit(sdp, tr);
813 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE); 813 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, 0);
814 814
815 if (sdp->sd_log_head != sdp->sd_log_flush_head) { 815 if (sdp->sd_log_head != sdp->sd_log_flush_head) {
816 log_flush_wait(sdp); 816 log_flush_wait(sdp);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 94dcab655bc0..2295042bc625 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -17,9 +17,7 @@
17#include <linux/bio.h> 17#include <linux/bio.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/list_sort.h> 19#include <linux/list_sort.h>
20#include <linux/blkdev.h>
21 20
22#include "bmap.h"
23#include "dir.h" 21#include "dir.h"
24#include "gfs2.h" 22#include "gfs2.h"
25#include "incore.h" 23#include "incore.h"
@@ -195,6 +193,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
195/** 193/**
196 * gfs2_end_log_write - end of i/o to the log 194 * gfs2_end_log_write - end of i/o to the log
197 * @bio: The bio 195 * @bio: The bio
196 * @error: Status of i/o request
198 * 197 *
199 * Each bio_vec contains either data from the pagecache or data 198 * Each bio_vec contains either data from the pagecache or data
200 * relating to the log itself. Here we iterate over the bio_vec 199 * relating to the log itself. Here we iterate over the bio_vec
@@ -231,19 +230,20 @@ static void gfs2_end_log_write(struct bio *bio)
231/** 230/**
232 * gfs2_log_submit_bio - Submit any pending log bio 231 * gfs2_log_submit_bio - Submit any pending log bio
233 * @biop: Address of the bio pointer 232 * @biop: Address of the bio pointer
234 * @opf: REQ_OP | op_flags 233 * @op: REQ_OP
234 * @op_flags: req_flag_bits
235 * 235 *
236 * Submit any pending part-built or full bio to the block device. If 236 * Submit any pending part-built or full bio to the block device. If
237 * there is no pending bio, then this is a no-op. 237 * there is no pending bio, then this is a no-op.
238 */ 238 */
239 239
240void gfs2_log_submit_bio(struct bio **biop, int opf) 240void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags)
241{ 241{
242 struct bio *bio = *biop; 242 struct bio *bio = *biop;
243 if (bio) { 243 if (bio) {
244 struct gfs2_sbd *sdp = bio->bi_private; 244 struct gfs2_sbd *sdp = bio->bi_private;
245 atomic_inc(&sdp->sd_log_in_flight); 245 atomic_inc(&sdp->sd_log_in_flight);
246 bio->bi_opf = opf; 246 bio_set_op_attrs(bio, op, op_flags);
247 submit_bio(bio); 247 submit_bio(bio);
248 *biop = NULL; 248 *biop = NULL;
249 } 249 }
@@ -304,7 +304,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
304 nblk >>= sdp->sd_fsb2bb_shift; 304 nblk >>= sdp->sd_fsb2bb_shift;
305 if (blkno == nblk && !flush) 305 if (blkno == nblk && !flush)
306 return bio; 306 return bio;
307 gfs2_log_submit_bio(biop, op); 307 gfs2_log_submit_bio(biop, op, 0);
308 } 308 }
309 309
310 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); 310 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
@@ -375,184 +375,6 @@ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
375 gfs2_log_bmap(sdp)); 375 gfs2_log_bmap(sdp));
376} 376}
377 377
378/**
379 * gfs2_end_log_read - end I/O callback for reads from the log
380 * @bio: The bio
381 *
382 * Simply unlock the pages in the bio. The main thread will wait on them and
383 * process them in order as necessary.
384 */
385
386static void gfs2_end_log_read(struct bio *bio)
387{
388 struct page *page;
389 struct bio_vec *bvec;
390 int i;
391
392 bio_for_each_segment_all(bvec, bio, i) {
393 page = bvec->bv_page;
394 if (bio->bi_status) {
395 int err = blk_status_to_errno(bio->bi_status);
396
397 SetPageError(page);
398 mapping_set_error(page->mapping, err);
399 }
400 unlock_page(page);
401 }
402
403 bio_put(bio);
404}
405
406/**
407 * gfs2_jhead_pg_srch - Look for the journal head in a given page.
408 * @jd: The journal descriptor
409 * @page: The page to look in
410 *
411 * Returns: 1 if found, 0 otherwise.
412 */
413
414static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
415 struct gfs2_log_header_host *head,
416 struct page *page)
417{
418 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
419 struct gfs2_log_header_host uninitialized_var(lh);
420 void *kaddr = kmap_atomic(page);
421 unsigned int offset;
422 bool ret = false;
423
424 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
425 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
426 if (lh.lh_sequence > head->lh_sequence)
427 *head = lh;
428 else {
429 ret = true;
430 break;
431 }
432 }
433 }
434 kunmap_atomic(kaddr);
435 return ret;
436}
437
438/**
439 * gfs2_jhead_process_page - Search/cleanup a page
440 * @jd: The journal descriptor
441 * @index: Index of the page to look into
442 * @done: If set, perform only cleanup, else search and set if found.
443 *
444 * Find the page with 'index' in the journal's mapping. Search the page for
445 * the journal head if requested (cleanup == false). Release refs on the
446 * page so the page cache can reclaim it (put_page() twice). We grabbed a
447 * reference on this page two times, first when we did a find_or_create_page()
448 * to obtain the page to add it to the bio and second when we do a
449 * find_get_page() here to get the page to wait on while I/O on it is being
450 * completed.
451 * This function is also used to free up a page we might've grabbed but not
452 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
453 * submitted the I/O, but we already found the jhead so we only need to drop
454 * our references to the page.
455 */
456
457static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
458 struct gfs2_log_header_host *head,
459 bool *done)
460{
461 struct page *page;
462
463 page = find_get_page(jd->jd_inode->i_mapping, index);
464 wait_on_page_locked(page);
465
466 if (PageError(page))
467 *done = true;
468
469 if (!*done)
470 *done = gfs2_jhead_pg_srch(jd, head, page);
471
472 put_page(page); /* Once for find_get_page */
473 put_page(page); /* Once more for find_or_create_page */
474}
475
476/**
477 * gfs2_find_jhead - find the head of a log
478 * @jd: The journal descriptor
479 * @head: The log descriptor for the head of the log is returned here
480 *
481 * Do a search of a journal by reading it in large chunks using bios and find
482 * the valid log entry with the highest sequence number. (i.e. the log head)
483 *
484 * Returns: 0 on success, errno otherwise
485 */
486
487int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
488{
489 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
490 struct address_space *mapping = jd->jd_inode->i_mapping;
491 struct gfs2_journal_extent *je;
492 u32 block, read_idx = 0, submit_idx = 0, index = 0;
493 int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
494 int blocks_per_page = 1 << shift, sz, ret = 0;
495 struct bio *bio = NULL;
496 struct page *page;
497 bool done = false;
498 errseq_t since;
499
500 memset(head, 0, sizeof(*head));
501 if (list_empty(&jd->extent_list))
502 gfs2_map_journal_extents(sdp, jd);
503
504 since = filemap_sample_wb_err(mapping);
505 list_for_each_entry(je, &jd->extent_list, list) {
506 for (block = 0; block < je->blocks; block += blocks_per_page) {
507 index = (je->lblock + block) >> shift;
508
509 page = find_or_create_page(mapping, index, GFP_NOFS);
510 if (!page) {
511 ret = -ENOMEM;
512 done = true;
513 goto out;
514 }
515
516 if (bio) {
517 sz = bio_add_page(bio, page, PAGE_SIZE, 0);
518 if (sz == PAGE_SIZE)
519 goto page_added;
520 submit_idx = index;
521 submit_bio(bio);
522 bio = NULL;
523 }
524
525 bio = gfs2_log_alloc_bio(sdp,
526 je->dblock + (index << shift),
527 gfs2_end_log_read);
528 bio->bi_opf = REQ_OP_READ;
529 sz = bio_add_page(bio, page, PAGE_SIZE, 0);
530 gfs2_assert_warn(sdp, sz == PAGE_SIZE);
531
532page_added:
533 if (submit_idx <= read_idx + BIO_MAX_PAGES) {
534 /* Keep at least one bio in flight */
535 continue;
536 }
537
538 gfs2_jhead_process_page(jd, read_idx++, head, &done);
539 if (done)
540 goto out; /* found */
541 }
542 }
543
544out:
545 if (bio)
546 submit_bio(bio);
547 while (read_idx <= index)
548 gfs2_jhead_process_page(jd, read_idx++, head, &done);
549
550 if (!ret)
551 ret = filemap_check_wb_err(mapping, since);
552
553 return ret;
554}
555
556static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, 378static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
557 u32 ld_length, u32 ld_data1) 379 u32 ld_length, u32 ld_data1)
558{ 380{
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 331160fc568b..711c4d89c063 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -30,10 +30,8 @@ extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp);
30extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, 30extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
31 unsigned size, unsigned offset, u64 blkno); 31 unsigned size, unsigned offset, u64 blkno);
32extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); 32extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
33extern void gfs2_log_submit_bio(struct bio **biop, int opf); 33extern void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags);
34extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); 34extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
35extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
36 struct gfs2_log_header_host *head);
37 35
38static inline unsigned int buf_limit(struct gfs2_sbd *sdp) 36static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
39{ 37{
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 1179763f6370..b041cb8ae383 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -41,7 +41,6 @@
41#include "dir.h" 41#include "dir.h"
42#include "meta_io.h" 42#include "meta_io.h"
43#include "trace_gfs2.h" 43#include "trace_gfs2.h"
44#include "lops.h"
45 44
46#define DO 0 45#define DO 0
47#define UNDO 1 46#define UNDO 1
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 7389e445a7a7..2dac43065382 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -182,6 +182,129 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
182} 182}
183 183
184/** 184/**
185 * find_good_lh - find a good log header
186 * @jd: the journal
187 * @blk: the segment to start searching from
188 * @lh: the log header to fill in
189 * @forward: if true search forward in the log, else search backward
190 *
191 * Call get_log_header() to get a log header for a segment, but if the
192 * segment is bad, either scan forward or backward until we find a good one.
193 *
194 * Returns: errno
195 */
196
197static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
198 struct gfs2_log_header_host *head)
199{
200 unsigned int orig_blk = *blk;
201 int error;
202
203 for (;;) {
204 error = get_log_header(jd, *blk, head);
205 if (error <= 0)
206 return error;
207
208 if (++*blk == jd->jd_blocks)
209 *blk = 0;
210
211 if (*blk == orig_blk) {
212 gfs2_consist_inode(GFS2_I(jd->jd_inode));
213 return -EIO;
214 }
215 }
216}
217
218/**
219 * jhead_scan - make sure we've found the head of the log
220 * @jd: the journal
221 * @head: this is filled in with the log descriptor of the head
222 *
223 * At this point, seg and lh should be either the head of the log or just
224 * before. Scan forward until we find the head.
225 *
226 * Returns: errno
227 */
228
229static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
230{
231 unsigned int blk = head->lh_blkno;
232 struct gfs2_log_header_host lh;
233 int error;
234
235 for (;;) {
236 if (++blk == jd->jd_blocks)
237 blk = 0;
238
239 error = get_log_header(jd, blk, &lh);
240 if (error < 0)
241 return error;
242 if (error == 1)
243 continue;
244
245 if (lh.lh_sequence == head->lh_sequence) {
246 gfs2_consist_inode(GFS2_I(jd->jd_inode));
247 return -EIO;
248 }
249 if (lh.lh_sequence < head->lh_sequence)
250 break;
251
252 *head = lh;
253 }
254
255 return 0;
256}
257
258/**
259 * gfs2_find_jhead - find the head of a log
260 * @jd: the journal
261 * @head: the log descriptor for the head of the log is returned here
262 *
263 * Do a binary search of a journal and find the valid log entry with the
264 * highest sequence number. (i.e. the log head)
265 *
266 * Returns: errno
267 */
268
269int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
270{
271 struct gfs2_log_header_host lh_1, lh_m;
272 u32 blk_1, blk_2, blk_m;
273 int error;
274
275 blk_1 = 0;
276 blk_2 = jd->jd_blocks - 1;
277
278 for (;;) {
279 blk_m = (blk_1 + blk_2) / 2;
280
281 error = find_good_lh(jd, &blk_1, &lh_1);
282 if (error)
283 return error;
284
285 error = find_good_lh(jd, &blk_m, &lh_m);
286 if (error)
287 return error;
288
289 if (blk_1 == blk_m || blk_m == blk_2)
290 break;
291
292 if (lh_1.lh_sequence <= lh_m.lh_sequence)
293 blk_1 = blk_m;
294 else
295 blk_2 = blk_m;
296 }
297
298 error = jhead_scan(jd, &lh_1);
299 if (error)
300 return error;
301
302 *head = lh_1;
303
304 return error;
305}
306
307/**
185 * foreach_descriptor - go through the active part of the log 308 * foreach_descriptor - go through the active part of the log
186 * @jd: the journal 309 * @jd: the journal
187 * @start: the first log header in the active region 310 * @start: the first log header in the active region
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 99575ab81202..11d81248be85 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -27,6 +27,8 @@ extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
27extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where); 27extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
28extern void gfs2_revoke_clean(struct gfs2_jdesc *jd); 28extern void gfs2_revoke_clean(struct gfs2_jdesc *jd);
29 29
30extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
31 struct gfs2_log_header_host *head);
30extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); 32extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
31extern void gfs2_recover_func(struct work_struct *work); 33extern void gfs2_recover_func(struct work_struct *work);
32extern int __get_log_header(struct gfs2_sbd *sdp, 34extern int __get_log_header(struct gfs2_sbd *sdp,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index d4b11c903971..ca71163ff7cf 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -45,7 +45,6 @@
45#include "util.h" 45#include "util.h"
46#include "sys.h" 46#include "sys.h"
47#include "xattr.h" 47#include "xattr.h"
48#include "lops.h"
49 48
50#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x) 49#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
51 50
diff --git a/fs/inode.c b/fs/inode.c
index 0cd47fe0dbe5..73432e64f874 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
730 return LRU_REMOVED; 730 return LRU_REMOVED;
731 } 731 }
732 732
733 /* 733 /* recently referenced inodes get one more pass */
734 * Recently referenced inodes and inodes with many attached pages 734 if (inode->i_state & I_REFERENCED) {
735 * get one more pass.
736 */
737 if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
738 inode->i_state &= ~I_REFERENCED; 735 inode->i_state &= ~I_REFERENCED;
739 spin_unlock(&inode->i_lock); 736 spin_unlock(&inode->i_lock);
740 return LRU_ROTATE; 737 return LRU_ROTATE;
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 3f23b6840547..bf34ddaa2ad7 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -44,6 +44,7 @@
44#include <linux/keyctl.h> 44#include <linux/keyctl.h>
45#include <linux/key-type.h> 45#include <linux/key-type.h>
46#include <keys/user-type.h> 46#include <keys/user-type.h>
47#include <keys/request_key_auth-type.h>
47#include <linux/module.h> 48#include <linux/module.h>
48 49
49#include "internal.h" 50#include "internal.h"
@@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy;
59struct idmap_legacy_upcalldata { 60struct idmap_legacy_upcalldata {
60 struct rpc_pipe_msg pipe_msg; 61 struct rpc_pipe_msg pipe_msg;
61 struct idmap_msg idmap_msg; 62 struct idmap_msg idmap_msg;
62 struct key_construction *key_cons; 63 struct key *authkey;
63 struct idmap *idmap; 64 struct idmap *idmap;
64}; 65};
65 66
@@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = {
384 { Opt_find_err, NULL } 385 { Opt_find_err, NULL }
385}; 386};
386 387
387static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *); 388static int nfs_idmap_legacy_upcall(struct key *, void *);
388static ssize_t idmap_pipe_downcall(struct file *, const char __user *, 389static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
389 size_t); 390 size_t);
390static void idmap_release_pipe(struct inode *); 391static void idmap_release_pipe(struct inode *);
@@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
549static void 550static void
550nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret) 551nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
551{ 552{
552 struct key_construction *cons = idmap->idmap_upcall_data->key_cons; 553 struct key *authkey = idmap->idmap_upcall_data->authkey;
553 554
554 kfree(idmap->idmap_upcall_data); 555 kfree(idmap->idmap_upcall_data);
555 idmap->idmap_upcall_data = NULL; 556 idmap->idmap_upcall_data = NULL;
556 complete_request_key(cons, ret); 557 complete_request_key(authkey, ret);
558 key_put(authkey);
557} 559}
558 560
559static void 561static void
@@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
563 nfs_idmap_complete_pipe_upcall_locked(idmap, ret); 565 nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
564} 566}
565 567
566static int nfs_idmap_legacy_upcall(struct key_construction *cons, 568static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
567 const char *op,
568 void *aux)
569{ 569{
570 struct idmap_legacy_upcalldata *data; 570 struct idmap_legacy_upcalldata *data;
571 struct request_key_auth *rka = get_request_key_auth(authkey);
571 struct rpc_pipe_msg *msg; 572 struct rpc_pipe_msg *msg;
572 struct idmap_msg *im; 573 struct idmap_msg *im;
573 struct idmap *idmap = (struct idmap *)aux; 574 struct idmap *idmap = (struct idmap *)aux;
574 struct key *key = cons->key; 575 struct key *key = rka->target_key;
575 int ret = -ENOKEY; 576 int ret = -ENOKEY;
576 577
577 if (!aux) 578 if (!aux)
@@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
586 msg = &data->pipe_msg; 587 msg = &data->pipe_msg;
587 im = &data->idmap_msg; 588 im = &data->idmap_msg;
588 data->idmap = idmap; 589 data->idmap = idmap;
589 data->key_cons = cons; 590 data->authkey = key_get(authkey);
590 591
591 ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); 592 ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
592 if (ret < 0) 593 if (ret < 0)
@@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
604out2: 605out2:
605 kfree(data); 606 kfree(data);
606out1: 607out1:
607 complete_request_key(cons, ret); 608 complete_request_key(authkey, ret);
608 return ret; 609 return ret;
609} 610}
610 611
@@ -651,9 +652,10 @@ out:
651static ssize_t 652static ssize_t
652idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) 653idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
653{ 654{
655 struct request_key_auth *rka;
654 struct rpc_inode *rpci = RPC_I(file_inode(filp)); 656 struct rpc_inode *rpci = RPC_I(file_inode(filp));
655 struct idmap *idmap = (struct idmap *)rpci->private; 657 struct idmap *idmap = (struct idmap *)rpci->private;
656 struct key_construction *cons; 658 struct key *authkey;
657 struct idmap_msg im; 659 struct idmap_msg im;
658 size_t namelen_in; 660 size_t namelen_in;
659 int ret = -ENOKEY; 661 int ret = -ENOKEY;
@@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
665 if (idmap->idmap_upcall_data == NULL) 667 if (idmap->idmap_upcall_data == NULL)
666 goto out_noupcall; 668 goto out_noupcall;
667 669
668 cons = idmap->idmap_upcall_data->key_cons; 670 authkey = idmap->idmap_upcall_data->authkey;
671 rka = get_request_key_auth(authkey);
669 672
670 if (mlen != sizeof(im)) { 673 if (mlen != sizeof(im)) {
671 ret = -ENOSPC; 674 ret = -ENOSPC;
@@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
690 693
691 ret = nfs_idmap_read_and_verify_message(&im, 694 ret = nfs_idmap_read_and_verify_message(&im,
692 &idmap->idmap_upcall_data->idmap_msg, 695 &idmap->idmap_upcall_data->idmap_msg,
693 cons->key, cons->authkey); 696 rka->target_key, authkey);
694 if (ret >= 0) { 697 if (ret >= 0) {
695 key_set_timeout(cons->key, nfs_idmap_cache_timeout); 698 key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
696 ret = mlen; 699 ret = mlen;
697 } 700 }
698 701
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index f12cb31a41e5..d09c9f878141 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -238,9 +238,9 @@ out:
238} 238}
239 239
240/* A writeback failed: mark the page as bad, and invalidate the page cache */ 240/* A writeback failed: mark the page as bad, and invalidate the page cache */
241static void nfs_set_pageerror(struct page *page) 241static void nfs_set_pageerror(struct address_space *mapping)
242{ 242{
243 nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); 243 nfs_zap_mapping(mapping->host, mapping);
244} 244}
245 245
246/* 246/*
@@ -994,7 +994,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
994 nfs_list_remove_request(req); 994 nfs_list_remove_request(req);
995 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 995 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
996 (hdr->good_bytes < bytes)) { 996 (hdr->good_bytes < bytes)) {
997 nfs_set_pageerror(req->wb_page); 997 nfs_set_pageerror(page_file_mapping(req->wb_page));
998 nfs_context_set_write_error(req->wb_context, hdr->error); 998 nfs_context_set_write_error(req->wb_context, hdr->error);
999 goto remove_req; 999 goto remove_req;
1000 } 1000 }
@@ -1348,7 +1348,8 @@ int nfs_updatepage(struct file *file, struct page *page,
1348 unsigned int offset, unsigned int count) 1348 unsigned int offset, unsigned int count)
1349{ 1349{
1350 struct nfs_open_context *ctx = nfs_file_open_context(file); 1350 struct nfs_open_context *ctx = nfs_file_open_context(file);
1351 struct inode *inode = page_file_mapping(page)->host; 1351 struct address_space *mapping = page_file_mapping(page);
1352 struct inode *inode = mapping->host;
1352 int status = 0; 1353 int status = 0;
1353 1354
1354 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1355 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -1366,7 +1367,7 @@ int nfs_updatepage(struct file *file, struct page *page,
1366 1367
1367 status = nfs_writepage_setup(ctx, page, offset, count); 1368 status = nfs_writepage_setup(ctx, page, offset, count);
1368 if (status < 0) 1369 if (status < 0)
1369 nfs_set_pageerror(page); 1370 nfs_set_pageerror(mapping);
1370 else 1371 else
1371 __set_page_dirty_nobuffers(page); 1372 __set_page_dirty_nobuffers(page);
1372out: 1373out:
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index b33f9785b756..72a7681f4046 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net)
1239 retval = nfsd_idmap_init(net); 1239 retval = nfsd_idmap_init(net);
1240 if (retval) 1240 if (retval)
1241 goto out_idmap_error; 1241 goto out_idmap_error;
1242 nn->nfsd4_lease = 45; /* default lease time */ 1242 nn->nfsd4_lease = 90; /* default lease time */
1243 nn->nfsd4_grace = 45; 1243 nn->nfsd4_grace = 90;
1244 nn->somebody_reclaimed = false; 1244 nn->somebody_reclaimed = false;
1245 nn->clverifier_counter = prandom_u32(); 1245 nn->clverifier_counter = prandom_u32();
1246 nn->clientid_counter = prandom_u32(); 1246 nn->clientid_counter = prandom_u32();
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 633a63462573..f5ed9512d193 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
1086 1086
1087 task_lock(p); 1087 task_lock(p);
1088 if (!p->vfork_done && process_shares_mm(p, mm)) { 1088 if (!p->vfork_done && process_shares_mm(p, mm)) {
1089 pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
1090 task_pid_nr(p), p->comm,
1091 p->signal->oom_score_adj, oom_adj,
1092 task_pid_nr(task), task->comm);
1093 p->signal->oom_score_adj = oom_adj; 1089 p->signal->oom_score_adj = oom_adj;
1094 if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) 1090 if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
1095 p->signal->oom_score_adj_min = (short)oom_adj; 1091 p->signal->oom_score_adj_min = (short)oom_adj;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f0ec9edab2f3..85b0ef890b28 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -423,7 +423,7 @@ struct mem_size_stats {
423}; 423};
424 424
425static void smaps_account(struct mem_size_stats *mss, struct page *page, 425static void smaps_account(struct mem_size_stats *mss, struct page *page,
426 bool compound, bool young, bool dirty) 426 bool compound, bool young, bool dirty, bool locked)
427{ 427{
428 int i, nr = compound ? 1 << compound_order(page) : 1; 428 int i, nr = compound ? 1 << compound_order(page) : 1;
429 unsigned long size = nr * PAGE_SIZE; 429 unsigned long size = nr * PAGE_SIZE;
@@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
450 else 450 else
451 mss->private_clean += size; 451 mss->private_clean += size;
452 mss->pss += (u64)size << PSS_SHIFT; 452 mss->pss += (u64)size << PSS_SHIFT;
453 if (locked)
454 mss->pss_locked += (u64)size << PSS_SHIFT;
453 return; 455 return;
454 } 456 }
455 457
456 for (i = 0; i < nr; i++, page++) { 458 for (i = 0; i < nr; i++, page++) {
457 int mapcount = page_mapcount(page); 459 int mapcount = page_mapcount(page);
460 unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
458 461
459 if (mapcount >= 2) { 462 if (mapcount >= 2) {
460 if (dirty || PageDirty(page)) 463 if (dirty || PageDirty(page))
461 mss->shared_dirty += PAGE_SIZE; 464 mss->shared_dirty += PAGE_SIZE;
462 else 465 else
463 mss->shared_clean += PAGE_SIZE; 466 mss->shared_clean += PAGE_SIZE;
464 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; 467 mss->pss += pss / mapcount;
468 if (locked)
469 mss->pss_locked += pss / mapcount;
465 } else { 470 } else {
466 if (dirty || PageDirty(page)) 471 if (dirty || PageDirty(page))
467 mss->private_dirty += PAGE_SIZE; 472 mss->private_dirty += PAGE_SIZE;
468 else 473 else
469 mss->private_clean += PAGE_SIZE; 474 mss->private_clean += PAGE_SIZE;
470 mss->pss += PAGE_SIZE << PSS_SHIFT; 475 mss->pss += pss;
476 if (locked)
477 mss->pss_locked += pss;
471 } 478 }
472 } 479 }
473} 480}
@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
490{ 497{
491 struct mem_size_stats *mss = walk->private; 498 struct mem_size_stats *mss = walk->private;
492 struct vm_area_struct *vma = walk->vma; 499 struct vm_area_struct *vma = walk->vma;
500 bool locked = !!(vma->vm_flags & VM_LOCKED);
493 struct page *page = NULL; 501 struct page *page = NULL;
494 502
495 if (pte_present(*pte)) { 503 if (pte_present(*pte)) {
@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
532 if (!page) 540 if (!page)
533 return; 541 return;
534 542
535 smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte)); 543 smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
536} 544}
537 545
538#ifdef CONFIG_TRANSPARENT_HUGEPAGE 546#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
541{ 549{
542 struct mem_size_stats *mss = walk->private; 550 struct mem_size_stats *mss = walk->private;
543 struct vm_area_struct *vma = walk->vma; 551 struct vm_area_struct *vma = walk->vma;
552 bool locked = !!(vma->vm_flags & VM_LOCKED);
544 struct page *page; 553 struct page *page;
545 554
546 /* FOLL_DUMP will return -EFAULT on huge zero page */ 555 /* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
555 /* pass */; 564 /* pass */;
556 else 565 else
557 VM_BUG_ON_PAGE(1, page); 566 VM_BUG_ON_PAGE(1, page);
558 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); 567 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
559} 568}
560#else 569#else
561static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 570static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
737 } 746 }
738 } 747 }
739#endif 748#endif
740
741 /* mmap_sem is held in m_start */ 749 /* mmap_sem is held in m_start */
742 walk_page_vma(vma, &smaps_walk); 750 walk_page_vma(vma, &smaps_walk);
743 if (vma->vm_flags & VM_LOCKED)
744 mss->pss_locked += mss->pss;
745} 751}
746 752
747#define SEQ_PUT_DEC(str, val) \ 753#define SEQ_PUT_DEC(str, val) \
diff --git a/include/uapi/asm-generic/shmparam.h b/include/asm-generic/shmparam.h
index 8b78c0ba08b1..8b78c0ba08b1 100644
--- a/include/uapi/asm-generic/shmparam.h
+++ b/include/asm-generic/shmparam.h
diff --git a/include/dt-bindings/clock/axg-aoclkc.h b/include/dt-bindings/clock/axg-aoclkc.h
index 61955016a55b..8ec4a269c7a6 100644
--- a/include/dt-bindings/clock/axg-aoclkc.h
+++ b/include/dt-bindings/clock/axg-aoclkc.h
@@ -21,6 +21,11 @@
21#define CLKID_AO_SAR_ADC_SEL 8 21#define CLKID_AO_SAR_ADC_SEL 8
22#define CLKID_AO_SAR_ADC_DIV 9 22#define CLKID_AO_SAR_ADC_DIV 9
23#define CLKID_AO_SAR_ADC_CLK 10 23#define CLKID_AO_SAR_ADC_CLK 10
24#define CLKID_AO_ALT_XTAL 11 24#define CLKID_AO_CTS_OSCIN 11
25#define CLKID_AO_32K_PRE 12
26#define CLKID_AO_32K_DIV 13
27#define CLKID_AO_32K_SEL 14
28#define CLKID_AO_32K 15
29#define CLKID_AO_CTS_RTC_OSCIN 16
25 30
26#endif 31#endif
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index 98bd85ce1e45..25ffa53573a5 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -156,7 +156,7 @@
156#define CLK_ACLK_G2D_266 220 156#define CLK_ACLK_G2D_266 220
157#define CLK_ACLK_G2D_400 221 157#define CLK_ACLK_G2D_400 221
158#define CLK_ACLK_G3D_400 222 158#define CLK_ACLK_G3D_400 222
159#define CLK_ACLK_IMEM_SSX_266 223 159#define CLK_ACLK_IMEM_SSSX_266 223
160#define CLK_ACLK_BUS0_400 224 160#define CLK_ACLK_BUS0_400 224
161#define CLK_ACLK_BUS1_400 225 161#define CLK_ACLK_BUS1_400 225
162#define CLK_ACLK_IMEM_200 226 162#define CLK_ACLK_IMEM_200 226
@@ -1406,4 +1406,10 @@
1406 1406
1407#define CAM1_NR_CLK 113 1407#define CAM1_NR_CLK 113
1408 1408
1409/* CMU_IMEM */
1410#define CLK_ACLK_SLIMSSS 2
1411#define CLK_PCLK_SLIMSSS 35
1412
1413#define IMEM_NR_CLK 36
1414
1409#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */ 1415#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */
diff --git a/include/dt-bindings/clock/g12a-aoclkc.h b/include/dt-bindings/clock/g12a-aoclkc.h
new file mode 100644
index 000000000000..8db01ffbeb06
--- /dev/null
+++ b/include/dt-bindings/clock/g12a-aoclkc.h
@@ -0,0 +1,34 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Copyright (c) 2016 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 *
6 * Copyright (c) 2018 Amlogic, inc.
7 * Author: Qiufang Dai <qiufang.dai@amlogic.com>
8 */
9
10#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_G12A_AOCLK
11#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_G12A_AOCLK
12
13#define CLKID_AO_AHB 0
14#define CLKID_AO_IR_IN 1
15#define CLKID_AO_I2C_M0 2
16#define CLKID_AO_I2C_S0 3
17#define CLKID_AO_UART 4
18#define CLKID_AO_PROD_I2C 5
19#define CLKID_AO_UART2 6
20#define CLKID_AO_IR_OUT 7
21#define CLKID_AO_SAR_ADC 8
22#define CLKID_AO_MAILBOX 9
23#define CLKID_AO_M3 10
24#define CLKID_AO_AHB_SRAM 11
25#define CLKID_AO_RTI 12
26#define CLKID_AO_M4_FCLK 13
27#define CLKID_AO_M4_HCLK 14
28#define CLKID_AO_CLK81 15
29#define CLKID_AO_SAR_ADC_CLK 18
30#define CLKID_AO_32K 23
31#define CLKID_AO_CEC 27
32#define CLKID_AO_CTS_RTC_OSCIN 28
33
34#endif
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
new file mode 100644
index 000000000000..83b657038d1e
--- /dev/null
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -0,0 +1,135 @@
1/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
2/*
3 * Meson-G12A clock tree IDs
4 *
5 * Copyright (c) 2018 Amlogic, Inc. All rights reserved.
6 */
7
8#ifndef __G12A_CLKC_H
9#define __G12A_CLKC_H
10
11#define CLKID_SYS_PLL 0
12#define CLKID_FIXED_PLL 1
13#define CLKID_FCLK_DIV2 2
14#define CLKID_FCLK_DIV3 3
15#define CLKID_FCLK_DIV4 4
16#define CLKID_FCLK_DIV5 5
17#define CLKID_FCLK_DIV7 6
18#define CLKID_GP0_PLL 7
19#define CLKID_CLK81 10
20#define CLKID_MPLL0 11
21#define CLKID_MPLL1 12
22#define CLKID_MPLL2 13
23#define CLKID_MPLL3 14
24#define CLKID_DDR 15
25#define CLKID_DOS 16
26#define CLKID_AUDIO_LOCKER 17
27#define CLKID_MIPI_DSI_HOST 18
28#define CLKID_ETH_PHY 19
29#define CLKID_ISA 20
30#define CLKID_PL301 21
31#define CLKID_PERIPHS 22
32#define CLKID_SPICC0 23
33#define CLKID_I2C 24
34#define CLKID_SANA 25
35#define CLKID_SD 26
36#define CLKID_RNG0 27
37#define CLKID_UART0 28
38#define CLKID_SPICC1 29
39#define CLKID_HIU_IFACE 30
40#define CLKID_MIPI_DSI_PHY 31
41#define CLKID_ASSIST_MISC 32
42#define CLKID_SD_EMMC_A 33
43#define CLKID_SD_EMMC_B 34
44#define CLKID_SD_EMMC_C 35
45#define CLKID_AUDIO_CODEC 36
46#define CLKID_AUDIO 37
47#define CLKID_ETH 38
48#define CLKID_DEMUX 39
49#define CLKID_AUDIO_IFIFO 40
50#define CLKID_ADC 41
51#define CLKID_UART1 42
52#define CLKID_G2D 43
53#define CLKID_RESET 44
54#define CLKID_PCIE_COMB 45
55#define CLKID_PARSER 46
56#define CLKID_USB 47
57#define CLKID_PCIE_PHY 48
58#define CLKID_AHB_ARB0 49
59#define CLKID_AHB_DATA_BUS 50
60#define CLKID_AHB_CTRL_BUS 51
61#define CLKID_HTX_HDCP22 52
62#define CLKID_HTX_PCLK 53
63#define CLKID_BT656 54
64#define CLKID_USB1_DDR_BRIDGE 55
65#define CLKID_MMC_PCLK 56
66#define CLKID_UART2 57
67#define CLKID_VPU_INTR 58
68#define CLKID_GIC 59
69#define CLKID_SD_EMMC_A_CLK0 60
70#define CLKID_SD_EMMC_B_CLK0 61
71#define CLKID_SD_EMMC_C_CLK0 62
72#define CLKID_HIFI_PLL 74
73#define CLKID_VCLK2_VENCI0 80
74#define CLKID_VCLK2_VENCI1 81
75#define CLKID_VCLK2_VENCP0 82
76#define CLKID_VCLK2_VENCP1 83
77#define CLKID_VCLK2_VENCT0 84
78#define CLKID_VCLK2_VENCT1 85
79#define CLKID_VCLK2_OTHER 86
80#define CLKID_VCLK2_ENCI 87
81#define CLKID_VCLK2_ENCP 88
82#define CLKID_DAC_CLK 89
83#define CLKID_AOCLK 90
84#define CLKID_IEC958 91
85#define CLKID_ENC480P 92
86#define CLKID_RNG1 93
87#define CLKID_VCLK2_ENCT 94
88#define CLKID_VCLK2_ENCL 95
89#define CLKID_VCLK2_VENCLMMC 96
90#define CLKID_VCLK2_VENCL 97
91#define CLKID_VCLK2_OTHER1 98
92#define CLKID_FCLK_DIV2P5 99
93#define CLKID_DMA 105
94#define CLKID_EFUSE 106
95#define CLKID_ROM_BOOT 107
96#define CLKID_RESET_SEC 108
97#define CLKID_SEC_AHB_APB3 109
98#define CLKID_VPU_0_SEL 110
99#define CLKID_VPU_0 112
100#define CLKID_VPU_1_SEL 113
101#define CLKID_VPU_1 115
102#define CLKID_VPU 116
103#define CLKID_VAPB_0_SEL 117
104#define CLKID_VAPB_0 119
105#define CLKID_VAPB_1_SEL 120
106#define CLKID_VAPB_1 122
107#define CLKID_VAPB_SEL 123
108#define CLKID_VAPB 124
109#define CLKID_HDMI_PLL 128
110#define CLKID_VID_PLL 129
111#define CLKID_VCLK 138
112#define CLKID_VCLK2 139
113#define CLKID_VCLK_DIV1 148
114#define CLKID_VCLK_DIV2 149
115#define CLKID_VCLK_DIV4 150
116#define CLKID_VCLK_DIV6 151
117#define CLKID_VCLK_DIV12 152
118#define CLKID_VCLK2_DIV1 153
119#define CLKID_VCLK2_DIV2 154
120#define CLKID_VCLK2_DIV4 155
121#define CLKID_VCLK2_DIV6 156
122#define CLKID_VCLK2_DIV12 157
123#define CLKID_CTS_ENCI 162
124#define CLKID_CTS_ENCP 163
125#define CLKID_CTS_VDAC 164
126#define CLKID_HDMI_TX 165
127#define CLKID_HDMI 168
128#define CLKID_MALI_0_SEL 169
129#define CLKID_MALI_0 171
130#define CLKID_MALI_1_SEL 172
131#define CLKID_MALI_1 174
132#define CLKID_MALI 175
133#define CLKID_MPLL_5OM 177
134
135#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/gxbb-aoclkc.h b/include/dt-bindings/clock/gxbb-aoclkc.h
index 9d15e2221fdb..ec3b26319fc4 100644
--- a/include/dt-bindings/clock/gxbb-aoclkc.h
+++ b/include/dt-bindings/clock/gxbb-aoclkc.h
@@ -63,5 +63,12 @@
63#define CLKID_AO_UART2 4 63#define CLKID_AO_UART2 4
64#define CLKID_AO_IR_BLASTER 5 64#define CLKID_AO_IR_BLASTER 5
65#define CLKID_AO_CEC_32K 6 65#define CLKID_AO_CEC_32K 6
66#define CLKID_AO_CTS_OSCIN 7
67#define CLKID_AO_32K_PRE 8
68#define CLKID_AO_32K_DIV 9
69#define CLKID_AO_32K_SEL 10
70#define CLKID_AO_32K 11
71#define CLKID_AO_CTS_RTC_OSCIN 12
72#define CLKID_AO_CLK81 13
66 73
67#endif 74#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 228a5e234af0..e785c6eb3561 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -71,6 +71,7 @@
71#define MMP2_CLK_CCIC1_MIX 117 71#define MMP2_CLK_CCIC1_MIX 117
72#define MMP2_CLK_CCIC1_PHY 118 72#define MMP2_CLK_CCIC1_PHY 118
73#define MMP2_CLK_CCIC1_SPHY 119 73#define MMP2_CLK_CCIC1_SPHY 119
74#define MMP2_CLK_DISP0_LCDC 120
74 75
75#define MMP2_NR_CLKS 200 76#define MMP2_NR_CLKS 200
76#endif 77#endif
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 5fe2923382d0..8067077a62ca 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -104,6 +104,7 @@
104#define CLKID_MPLL2 95 104#define CLKID_MPLL2 95
105#define CLKID_NAND_CLK 112 105#define CLKID_NAND_CLK 112
106#define CLKID_ABP 124 106#define CLKID_ABP 124
107#define CLKID_APB 124
107#define CLKID_PERIPH 126 108#define CLKID_PERIPH 126
108#define CLKID_AXI 128 109#define CLKID_AXI 128
109#define CLKID_L2_DRAM 130 110#define CLKID_L2_DRAM 130
diff --git a/include/dt-bindings/clock/r8a774a1-cpg-mssr.h b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
index 9bc5d45ff4b5..e355363f40c2 100644
--- a/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
@@ -54,5 +54,6 @@
54#define R8A774A1_CLK_CPEX 43 54#define R8A774A1_CLK_CPEX 43
55#define R8A774A1_CLK_R 44 55#define R8A774A1_CLK_R 44
56#define R8A774A1_CLK_OSC 45 56#define R8A774A1_CLK_OSC 45
57#define R8A774A1_CLK_CANFD 46
57 58
58#endif /* __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ */ 59#endif /* __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a774c0-cpg-mssr.h b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
index 8fe51b6aca28..8ad9cd6be8e9 100644
--- a/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
@@ -56,5 +56,6 @@
56#define R8A774C0_CLK_CSI0 45 56#define R8A774C0_CLK_CSI0 45
57#define R8A774C0_CLK_CP 46 57#define R8A774C0_CLK_CP 46
58#define R8A774C0_CLK_CPEX 47 58#define R8A774C0_CLK_CPEX 47
59#define R8A774C0_CLK_CANFD 48
59 60
60#endif /* __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ */ 61#endif /* __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/reset/g12a-aoclkc.h b/include/dt-bindings/reset/g12a-aoclkc.h
new file mode 100644
index 000000000000..bd2e2337135c
--- /dev/null
+++ b/include/dt-bindings/reset/g12a-aoclkc.h
@@ -0,0 +1,18 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Copyright (c) 2016 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 */
6
7#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_G12A_AOCLK
8#define DT_BINDINGS_RESET_AMLOGIC_MESON_G12A_AOCLK
9
10#define RESET_AO_IR_IN 0
11#define RESET_AO_UART 1
12#define RESET_AO_I2C_M 2
13#define RESET_AO_I2C_S 3
14#define RESET_AO_SAR_ADC 4
15#define RESET_AO_UART2 5
16#define RESET_AO_IR_OUT 6
17
18#endif
diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h
new file mode 100644
index 000000000000..a726dd3f1dc6
--- /dev/null
+++ b/include/keys/request_key_auth-type.h
@@ -0,0 +1,36 @@
1/* request_key authorisation token key type
2 *
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H
13#define _KEYS_REQUEST_KEY_AUTH_TYPE_H
14
15#include <linux/key.h>
16
17/*
18 * Authorisation record for request_key().
19 */
20struct request_key_auth {
21 struct key *target_key;
22 struct key *dest_keyring;
23 const struct cred *cred;
24 void *callout_info;
25 size_t callout_len;
26 pid_t pid;
27 char op[8];
28} __randomize_layout;
29
30static inline struct request_key_auth *get_request_key_auth(const struct key *key)
31{
32 return key->payload.data[0];
33}
34
35
36#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index e098cbe27db5..12babe991594 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -31,7 +31,7 @@
31struct user_key_payload { 31struct user_key_payload {
32 struct rcu_head rcu; /* RCU destructor */ 32 struct rcu_head rcu; /* RCU destructor */
33 unsigned short datalen; /* length of this data */ 33 unsigned short datalen; /* length of this data */
34 char data[0]; /* actual data */ 34 char data[0] __aligned(__alignof__(u64)); /* actual data */
35}; 35};
36 36
37extern struct key_type key_type_user; 37extern struct key_type key_type_user;
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4f31f96bbfab..c36c86f1ec9a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -100,7 +100,7 @@ enum vgic_irq_config {
100}; 100};
101 101
102struct vgic_irq { 102struct vgic_irq {
103 spinlock_t irq_lock; /* Protects the content of the struct */ 103 raw_spinlock_t irq_lock; /* Protects the content of the struct */
104 struct list_head lpi_list; /* Used to link all LPIs together */ 104 struct list_head lpi_list; /* Used to link all LPIs together */
105 struct list_head ap_list; 105 struct list_head ap_list;
106 106
@@ -256,7 +256,7 @@ struct vgic_dist {
256 u64 propbaser; 256 u64 propbaser;
257 257
258 /* Protects the lpi_list and the count value below. */ 258 /* Protects the lpi_list and the count value below. */
259 spinlock_t lpi_list_lock; 259 raw_spinlock_t lpi_list_lock;
260 struct list_head lpi_list_head; 260 struct list_head lpi_list_head;
261 int lpi_list_count; 261 int lpi_list_count;
262 262
@@ -307,7 +307,7 @@ struct vgic_cpu {
307 unsigned int used_lrs; 307 unsigned int used_lrs;
308 struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; 308 struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
309 309
310 spinlock_t ap_list_lock; /* Protects the ap_list */ 310 raw_spinlock_t ap_list_lock; /* Protects the ap_list */
311 311
312 /* 312 /*
313 * List of IRQs that this VCPU should consider because they are either 313 * List of IRQs that this VCPU should consider because they are either
diff --git a/include/linux/clk.h b/include/linux/clk.h
index a7773b5c0b9f..d8bc1a856b39 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -384,6 +384,17 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
384struct clk *devm_clk_get(struct device *dev, const char *id); 384struct clk *devm_clk_get(struct device *dev, const char *id);
385 385
386/** 386/**
387 * devm_clk_get_optional - lookup and obtain a managed reference to an optional
388 * clock producer.
389 * @dev: device for clock "consumer"
390 * @id: clock consumer ID
391 *
392 * Behaves the same as devm_clk_get() except where there is no clock producer.
393 * In this case, instead of returning -ENOENT, the function returns NULL.
394 */
395struct clk *devm_clk_get_optional(struct device *dev, const char *id);
396
397/**
387 * devm_get_clk_from_child - lookup and obtain a managed reference to a 398 * devm_get_clk_from_child - lookup and obtain a managed reference to a
388 * clock producer from child node. 399 * clock producer from child node.
389 * @dev: device for clock "consumer" 400 * @dev: device for clock "consumer"
@@ -718,6 +729,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
718 return NULL; 729 return NULL;
719} 730}
720 731
732static inline struct clk *devm_clk_get_optional(struct device *dev,
733 const char *id)
734{
735 return NULL;
736}
737
721static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 738static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
722 struct clk_bulk_data *clks) 739 struct clk_bulk_data *clks)
723{ 740{
@@ -862,6 +879,25 @@ static inline void clk_bulk_disable_unprepare(int num_clks,
862 clk_bulk_unprepare(num_clks, clks); 879 clk_bulk_unprepare(num_clks, clks);
863} 880}
864 881
882/**
883 * clk_get_optional - lookup and obtain a reference to an optional clock
884 * producer.
885 * @dev: device for clock "consumer"
886 * @id: clock consumer ID
887 *
888 * Behaves the same as clk_get() except where there is no clock producer. In
889 * this case, instead of returning -ENOENT, the function returns NULL.
890 */
891static inline struct clk *clk_get_optional(struct device *dev, const char *id)
892{
893 struct clk *clk = clk_get(dev, id);
894
895 if (clk == ERR_PTR(-ENOENT))
896 return NULL;
897
898 return clk;
899}
900
865#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 901#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
866struct clk *of_clk_get(struct device_node *np, int index); 902struct clk *of_clk_get(struct device_node *np, int index);
867struct clk *of_clk_get_by_name(struct device_node *np, const char *name); 903struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index eacc5df57b99..78872efc7be0 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -160,6 +160,7 @@ struct clk_hw_omap {
160 struct clockdomain *clkdm; 160 struct clockdomain *clkdm;
161 const struct clk_hw_omap_ops *ops; 161 const struct clk_hw_omap_ops *ops;
162 u32 context; 162 u32 context;
163 int autoidle_count;
163}; 164};
164 165
165/* 166/*
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 4890ff033220..ccb32af5848b 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -52,4 +52,8 @@ int clk_add_alias(const char *, const char *, const char *, struct device *);
52int clk_register_clkdev(struct clk *, const char *, const char *); 52int clk_register_clkdev(struct clk *, const char *, const char *);
53int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); 53int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
54 54
55int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
56 const char *con_id, const char *dev_id);
57void devm_clk_release_clkdev(struct device *dev, const char *con_id,
58 const char *dev_id);
55#endif 59#endif
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 19f32b0c29af..6b318efd8a74 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -34,6 +34,7 @@
34#ifndef __has_attribute 34#ifndef __has_attribute
35# define __has_attribute(x) __GCC4_has_attribute_##x 35# define __has_attribute(x) __GCC4_has_attribute_##x
36# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) 36# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
37# define __GCC4_has_attribute___copy__ 0
37# define __GCC4_has_attribute___designated_init__ 0 38# define __GCC4_has_attribute___designated_init__ 0
38# define __GCC4_has_attribute___externally_visible__ 1 39# define __GCC4_has_attribute___externally_visible__ 1
39# define __GCC4_has_attribute___noclone__ 1 40# define __GCC4_has_attribute___noclone__ 1
@@ -101,6 +102,19 @@
101#define __attribute_const__ __attribute__((__const__)) 102#define __attribute_const__ __attribute__((__const__))
102 103
103/* 104/*
105 * Optional: only supported since gcc >= 9
106 * Optional: not supported by clang
107 * Optional: not supported by icc
108 *
109 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute
110 */
111#if __has_attribute(__copy__)
112# define __copy(symbol) __attribute__((__copy__(symbol)))
113#else
114# define __copy(symbol)
115#endif
116
117/*
104 * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' 118 * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
105 * attribute warnings entirely and for good") for more information. 119 * attribute warnings entirely and for good") for more information.
106 * 120 *
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 45ff763fba76..28604a8d0aa9 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1198,8 +1198,6 @@ static inline bool efi_enabled(int feature)
1198extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); 1198extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
1199 1199
1200extern bool efi_is_table_address(unsigned long phys_addr); 1200extern bool efi_is_table_address(unsigned long phys_addr);
1201
1202extern int efi_apply_persistent_mem_reservations(void);
1203#else 1201#else
1204static inline bool efi_enabled(int feature) 1202static inline bool efi_enabled(int feature)
1205{ 1203{
@@ -1218,11 +1216,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
1218{ 1216{
1219 return false; 1217 return false;
1220} 1218}
1221
1222static inline int efi_apply_persistent_mem_reservations(void)
1223{
1224 return 0;
1225}
1226#endif 1219#endif
1227 1220
1228extern int efi_status_to_err(efi_status_t status); 1221extern int efi_status_to_err(efi_status_t status);
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index bc9af551fc83..e49d1de0614e 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -21,15 +21,6 @@ struct kernel_pkey_query;
21struct kernel_pkey_params; 21struct kernel_pkey_params;
22 22
23/* 23/*
24 * key under-construction record
25 * - passed to the request_key actor if supplied
26 */
27struct key_construction {
28 struct key *key; /* key being constructed */
29 struct key *authkey;/* authorisation for key being constructed */
30};
31
32/*
33 * Pre-parsed payload, used by key add, update and instantiate. 24 * Pre-parsed payload, used by key add, update and instantiate.
34 * 25 *
35 * This struct will be cleared and data and datalen will be set with the data 26 * This struct will be cleared and data and datalen will be set with the data
@@ -50,8 +41,7 @@ struct key_preparsed_payload {
50 time64_t expiry; /* Expiry time of key */ 41 time64_t expiry; /* Expiry time of key */
51} __randomize_layout; 42} __randomize_layout;
52 43
53typedef int (*request_key_actor_t)(struct key_construction *key, 44typedef int (*request_key_actor_t)(struct key *auth_key, void *aux);
54 const char *op, void *aux);
55 45
56/* 46/*
57 * Preparsed matching criterion. 47 * Preparsed matching criterion.
@@ -181,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key,
181 const void *data, 171 const void *data,
182 size_t datalen, 172 size_t datalen,
183 struct key *keyring, 173 struct key *keyring,
184 struct key *instkey); 174 struct key *authkey);
185extern int key_reject_and_link(struct key *key, 175extern int key_reject_and_link(struct key *key,
186 unsigned timeout, 176 unsigned timeout,
187 unsigned error, 177 unsigned error,
188 struct key *keyring, 178 struct key *keyring,
189 struct key *instkey); 179 struct key *authkey);
190extern void complete_request_key(struct key_construction *cons, int error); 180extern void complete_request_key(struct key *authkey, int error);
191 181
192static inline int key_negate_and_link(struct key *key, 182static inline int key_negate_and_link(struct key *key,
193 unsigned timeout, 183 unsigned timeout,
194 struct key *keyring, 184 struct key *keyring,
195 struct key *instkey) 185 struct key *authkey)
196{ 186{
197 return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey); 187 return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey);
198} 188}
199 189
200extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); 190extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 64c41cf45590..859b55b66db2 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -29,9 +29,6 @@ extern unsigned long max_pfn;
29 */ 29 */
30extern unsigned long long max_possible_pfn; 30extern unsigned long long max_possible_pfn;
31 31
32#define INIT_MEMBLOCK_REGIONS 128
33#define INIT_PHYSMEM_REGIONS 4
34
35/** 32/**
36 * enum memblock_flags - definition of memory region attributes 33 * enum memblock_flags - definition of memory region attributes
37 * @MEMBLOCK_NONE: no special request 34 * @MEMBLOCK_NONE: no special request
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index de7377815b6b..8ef330027b13 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -308,6 +308,7 @@ struct mmc_card {
308 unsigned int nr_parts; 308 unsigned int nr_parts;
309 309
310 unsigned int bouncesz; /* Bounce buffer size */ 310 unsigned int bouncesz; /* Bounce buffer size */
311 struct workqueue_struct *complete_wq; /* Private workqueue */
311}; 312};
312 313
313static inline bool mmc_large_sector(struct mmc_card *card) 314static inline bool mmc_large_sector(struct mmc_card *card)
diff --git a/include/linux/module.h b/include/linux/module.h
index 8fa38d3e7538..f5bc4c046461 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -129,13 +129,13 @@ extern void cleanup_module(void);
129#define module_init(initfn) \ 129#define module_init(initfn) \
130 static inline initcall_t __maybe_unused __inittest(void) \ 130 static inline initcall_t __maybe_unused __inittest(void) \
131 { return initfn; } \ 131 { return initfn; } \
132 int init_module(void) __attribute__((alias(#initfn))); 132 int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
133 133
134/* This is only required if you want to be unloadable. */ 134/* This is only required if you want to be unloadable. */
135#define module_exit(exitfn) \ 135#define module_exit(exitfn) \
136 static inline exitcall_t __maybe_unused __exittest(void) \ 136 static inline exitcall_t __maybe_unused __exittest(void) \
137 { return exitfn; } \ 137 { return exitfn; } \
138 void cleanup_module(void) __attribute__((alias(#exitfn))); 138 void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
139 139
140#endif 140#endif
141 141
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 2b2a6dce1630..4c76fe2c8488 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -11,6 +11,8 @@
11#define _LINUX_NETDEV_FEATURES_H 11#define _LINUX_NETDEV_FEATURES_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/bitops.h>
15#include <asm/byteorder.h>
14 16
15typedef u64 netdev_features_t; 17typedef u64 netdev_features_t;
16 18
@@ -154,8 +156,26 @@ enum {
154#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) 156#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
155#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) 157#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
156 158
157#define for_each_netdev_feature(mask_addr, bit) \ 159/* Finds the next feature with the highest number of the range of start till 0.
158 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) 160 */
161static inline int find_next_netdev_feature(u64 feature, unsigned long start)
162{
163 /* like BITMAP_LAST_WORD_MASK() for u64
164 * this sets the most significant 64 - start to 0.
165 */
166 feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
167
168 return fls64(feature) - 1;
169}
170
171/* This goes for the MSB to the LSB through the set feature bits,
172 * mask_addr should be a u64 and bit an int
173 */
174#define for_each_netdev_feature(mask_addr, bit) \
175 for ((bit) = find_next_netdev_feature((mask_addr), \
176 NETDEV_FEATURE_COUNT); \
177 (bit) >= 0; \
178 (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
159 179
160/* Features valid for ethtool to change */ 180/* Features valid for ethtool to change */
161/* = all defined minus driver/device-class-related */ 181/* = all defined minus driver/device-class-related */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1d5c551a5add..e1a051724f7e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -447,6 +447,11 @@ struct pmu {
447 * Filter events for PMU-specific reasons. 447 * Filter events for PMU-specific reasons.
448 */ 448 */
449 int (*filter_match) (struct perf_event *event); /* optional */ 449 int (*filter_match) (struct perf_event *event); /* optional */
450
451 /*
452 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
453 */
454 int (*check_period) (struct perf_event *event, u64 value); /* optional */
450}; 455};
451 456
452enum perf_addr_filter_action_t { 457enum perf_addr_filter_action_t {
diff --git a/include/linux/phy.h b/include/linux/phy.h
index ef20aeea10cc..333b56d8f746 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -674,26 +674,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
674size_t phy_speeds(unsigned int *speeds, size_t size, 674size_t phy_speeds(unsigned int *speeds, size_t size,
675 unsigned long *mask); 675 unsigned long *mask);
676 676
677static inline bool __phy_is_started(struct phy_device *phydev)
678{
679 WARN_ON(!mutex_is_locked(&phydev->lock));
680
681 return phydev->state >= PHY_UP;
682}
683
684/** 677/**
685 * phy_is_started - Convenience function to check whether PHY is started 678 * phy_is_started - Convenience function to check whether PHY is started
686 * @phydev: The phy_device struct 679 * @phydev: The phy_device struct
687 */ 680 */
688static inline bool phy_is_started(struct phy_device *phydev) 681static inline bool phy_is_started(struct phy_device *phydev)
689{ 682{
690 bool started; 683 return phydev->state >= PHY_UP;
691
692 mutex_lock(&phydev->lock);
693 started = __phy_is_started(phydev);
694 mutex_unlock(&phydev->lock);
695
696 return started;
697} 684}
698 685
699void phy_resolve_aneg_linkmode(struct phy_device *phydev); 686void phy_resolve_aneg_linkmode(struct phy_device *phydev);
@@ -1005,6 +992,14 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
1005{ 992{
1006 return 0; 993 return 0;
1007} 994}
995static inline int genphy_no_ack_interrupt(struct phy_device *phydev)
996{
997 return 0;
998}
999static inline int genphy_no_config_intr(struct phy_device *phydev)
1000{
1001 return 0;
1002}
1008int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, 1003int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
1009 u16 regnum); 1004 u16 regnum);
1010int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, 1005int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 95d25b010a25..bdb9563c64a0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2434,7 +2434,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
2434 2434
2435 if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) 2435 if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
2436 skb_set_transport_header(skb, keys.control.thoff); 2436 skb_set_transport_header(skb, keys.control.thoff);
2437 else 2437 else if (offset_hint >= 0)
2438 skb_set_transport_header(skb, offset_hint); 2438 skb_set_transport_header(skb, offset_hint);
2439} 2439}
2440 2440
@@ -4212,6 +4212,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4212 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; 4212 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4213} 4213}
4214 4214
4215static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4216{
4217 return skb_is_gso(skb) &&
4218 skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4219}
4220
4215static inline void skb_gso_reset(struct sk_buff *skb) 4221static inline void skb_gso_reset(struct sk_buff *skb)
4216{ 4222{
4217 skb_shinfo(skb)->gso_size = 0; 4223 skb_shinfo(skb)->gso_size = 0;
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index cb462f9ab7dd..e0348cb0a1dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
57 57
58 if (!skb_partial_csum_set(skb, start, off)) 58 if (!skb_partial_csum_set(skb, start, off))
59 return -EINVAL; 59 return -EINVAL;
60 } else {
61 /* gso packets without NEEDS_CSUM do not set transport_offset.
62 * probe and drop if does not match one of the above types.
63 */
64 if (gso_type && skb->network_header) {
65 if (!skb->protocol)
66 virtio_net_hdr_set_proto(skb, hdr);
67retry:
68 skb_probe_transport_header(skb, -1);
69 if (!skb_transport_header_was_set(skb)) {
70 /* UFO does not specify ipv4 or 6: try both */
71 if (gso_type & SKB_GSO_UDP &&
72 skb->protocol == htons(ETH_P_IP)) {
73 skb->protocol = htons(ETH_P_IPV6);
74 goto retry;
75 }
76 return -EINVAL;
77 }
78 }
60 } 79 }
61 80
62 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 81 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 00b5e7825508..74ff688568a0 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -39,6 +39,7 @@ struct inet_peer {
39 39
40 u32 metrics[RTAX_MAX]; 40 u32 metrics[RTAX_MAX];
41 u32 rate_tokens; /* rate limiting for ICMP */ 41 u32 rate_tokens; /* rate limiting for ICMP */
42 u32 n_redirects;
42 unsigned long rate_last; 43 unsigned long rate_last;
43 /* 44 /*
44 * Once inet_peer is queued for deletion (refcnt == 0), following field 45 * Once inet_peer is queued for deletion (refcnt == 0), following field
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index b669fe6dbc3b..98f31c7ea23d 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -63,10 +63,11 @@ struct pnpipehdr {
63 u8 state_after_reset; /* reset request */ 63 u8 state_after_reset; /* reset request */
64 u8 error_code; /* any response */ 64 u8 error_code; /* any response */
65 u8 pep_type; /* status indication */ 65 u8 pep_type; /* status indication */
66 u8 data[1]; 66 u8 data0; /* anything else */
67 }; 67 };
68 u8 data[];
68}; 69};
69#define other_pep_type data[1] 70#define other_pep_type data[0]
70 71
71static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb) 72static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
72{ 73{
diff --git a/include/net/sock.h b/include/net/sock.h
index 2b229f7be8eb..f43f935cb113 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1277,7 +1277,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
1277 percpu_counter_inc(sk->sk_prot->sockets_allocated); 1277 percpu_counter_inc(sk->sk_prot->sockets_allocated);
1278} 1278}
1279 1279
1280static inline int 1280static inline u64
1281sk_sockets_allocated_read_positive(struct sock *sk) 1281sk_sockets_allocated_read_positive(struct sock *sk)
1282{ 1282{
1283 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); 1283 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 7298a53b9702..85386becbaea 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -853,7 +853,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
853 xfrm_pol_put(pols[i]); 853 xfrm_pol_put(pols[i]);
854} 854}
855 855
856void __xfrm_state_destroy(struct xfrm_state *); 856void __xfrm_state_destroy(struct xfrm_state *, bool);
857 857
858static inline void __xfrm_state_put(struct xfrm_state *x) 858static inline void __xfrm_state_put(struct xfrm_state *x)
859{ 859{
@@ -863,7 +863,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x)
863static inline void xfrm_state_put(struct xfrm_state *x) 863static inline void xfrm_state_put(struct xfrm_state *x)
864{ 864{
865 if (refcount_dec_and_test(&x->refcnt)) 865 if (refcount_dec_and_test(&x->refcnt))
866 __xfrm_state_destroy(x); 866 __xfrm_state_destroy(x, false);
867}
868
869static inline void xfrm_state_put_sync(struct xfrm_state *x)
870{
871 if (refcount_dec_and_test(&x->refcnt))
872 __xfrm_state_destroy(x, true);
867} 873}
868 874
869static inline void xfrm_state_hold(struct xfrm_state *x) 875static inline void xfrm_state_hold(struct xfrm_state *x)
@@ -1590,7 +1596,7 @@ struct xfrmk_spdinfo {
1590 1596
1591struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); 1597struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1592int xfrm_state_delete(struct xfrm_state *x); 1598int xfrm_state_delete(struct xfrm_state *x);
1593int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); 1599int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1594int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); 1600int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1595void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); 1601void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1596void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); 1602void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 14565d703291..e8baca85bac6 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -137,15 +137,21 @@ enum {
137 INET_DIAG_TCLASS, 137 INET_DIAG_TCLASS,
138 INET_DIAG_SKMEMINFO, 138 INET_DIAG_SKMEMINFO,
139 INET_DIAG_SHUTDOWN, 139 INET_DIAG_SHUTDOWN,
140 INET_DIAG_DCTCPINFO, 140
141 INET_DIAG_PROTOCOL, /* response attribute only */ 141 /*
142 * Next extenstions cannot be requested in struct inet_diag_req_v2:
143 * its field idiag_ext has only 8 bits.
144 */
145
146 INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
147 INET_DIAG_PROTOCOL, /* response attribute only */
142 INET_DIAG_SKV6ONLY, 148 INET_DIAG_SKV6ONLY,
143 INET_DIAG_LOCALS, 149 INET_DIAG_LOCALS,
144 INET_DIAG_PEERS, 150 INET_DIAG_PEERS,
145 INET_DIAG_PAD, 151 INET_DIAG_PAD,
146 INET_DIAG_MARK, 152 INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
147 INET_DIAG_BBRINFO, 153 INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
148 INET_DIAG_CLASS_ID, 154 INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
149 INET_DIAG_MD5SIG, 155 INET_DIAG_MD5SIG,
150 __INET_DIAG_MAX, 156 __INET_DIAG_MAX,
151}; 157};
diff --git a/init/initramfs.c b/init/initramfs.c
index 7cea802d00ef..fca899622937 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -550,6 +550,7 @@ skip:
550 initrd_end = 0; 550 initrd_end = 0;
551} 551}
552 552
553#ifdef CONFIG_BLK_DEV_RAM
553#define BUF_SIZE 1024 554#define BUF_SIZE 1024
554static void __init clean_rootfs(void) 555static void __init clean_rootfs(void)
555{ 556{
@@ -596,6 +597,7 @@ static void __init clean_rootfs(void)
596 ksys_close(fd); 597 ksys_close(fd);
597 kfree(buf); 598 kfree(buf);
598} 599}
600#endif
599 601
600static int __init populate_rootfs(void) 602static int __init populate_rootfs(void)
601{ 603{
@@ -638,10 +640,8 @@ static int __init populate_rootfs(void)
638 printk(KERN_INFO "Unpacking initramfs...\n"); 640 printk(KERN_INFO "Unpacking initramfs...\n");
639 err = unpack_to_rootfs((char *)initrd_start, 641 err = unpack_to_rootfs((char *)initrd_start,
640 initrd_end - initrd_start); 642 initrd_end - initrd_start);
641 if (err) { 643 if (err)
642 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); 644 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
643 clean_rootfs();
644 }
645 free_initrd(); 645 free_initrd();
646#endif 646#endif
647 } 647 }
diff --git a/init/main.c b/init/main.c
index e2e80ca3165a..c86a1c8f19f4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -695,7 +695,6 @@ asmlinkage __visible void __init start_kernel(void)
695 initrd_start = 0; 695 initrd_start = 0;
696 } 696 }
697#endif 697#endif
698 page_ext_init();
699 kmemleak_init(); 698 kmemleak_init();
700 setup_per_cpu_pageset(); 699 setup_per_cpu_pageset();
701 numa_policy_init(); 700 numa_policy_init();
@@ -1131,6 +1130,8 @@ static noinline void __init kernel_init_freeable(void)
1131 sched_init_smp(); 1130 sched_init_smp();
1132 1131
1133 page_alloc_init_late(); 1132 page_alloc_init_late();
1133 /* Initialize page ext after all struct pages are initialized. */
1134 page_ext_init();
1134 1135
1135 do_basic_setup(); 1136 do_basic_setup();
1136 1137
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index abf1002080df..93a5cbbde421 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -471,6 +471,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
471 } 471 }
472 472
473 if (!node || node->prefixlen != key->prefixlen || 473 if (!node || node->prefixlen != key->prefixlen ||
474 node->prefixlen != matchlen ||
474 (node->flags & LPM_TREE_NODE_FLAG_IM)) { 475 (node->flags & LPM_TREE_NODE_FLAG_IM)) {
475 ret = -ENOENT; 476 ret = -ENOENT;
476 goto out; 477 goto out;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index d43b14535827..950ab2f28922 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
44 struct stack_map_irq_work *work; 44 struct stack_map_irq_work *work;
45 45
46 work = container_of(entry, struct stack_map_irq_work, irq_work); 46 work = container_of(entry, struct stack_map_irq_work, irq_work);
47 up_read(work->sem); 47 up_read_non_owner(work->sem);
48 work->sem = NULL; 48 work->sem = NULL;
49} 49}
50 50
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
338 } else { 338 } else {
339 work->sem = &current->mm->mmap_sem; 339 work->sem = &current->mm->mmap_sem;
340 irq_work_queue(&work->irq_work); 340 irq_work_queue(&work->irq_work);
341 /*
342 * The irq_work will release the mmap_sem with
343 * up_read_non_owner(). The rwsem_release() is called
344 * here to release the lock from lockdep's perspective.
345 */
346 rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
341 } 347 }
342} 348}
343 349
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 56674a7c3778..8f295b790297 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
1617 return 0; 1617 return 0;
1618} 1618}
1619 1619
1620static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, 1620static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
1621 int size, enum bpf_access_type t) 1621 u32 regno, int off, int size,
1622 enum bpf_access_type t)
1622{ 1623{
1623 struct bpf_reg_state *regs = cur_regs(env); 1624 struct bpf_reg_state *regs = cur_regs(env);
1624 struct bpf_reg_state *reg = &regs[regno]; 1625 struct bpf_reg_state *reg = &regs[regno];
1625 struct bpf_insn_access_aux info; 1626 struct bpf_insn_access_aux info = {};
1626 1627
1627 if (reg->smin_value < 0) { 1628 if (reg->smin_value < 0) {
1628 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 1629 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
@@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
1636 return -EACCES; 1637 return -EACCES;
1637 } 1638 }
1638 1639
1640 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1641
1639 return 0; 1642 return 0;
1640} 1643}
1641 1644
@@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
2032 verbose(env, "cannot write into socket\n"); 2035 verbose(env, "cannot write into socket\n");
2033 return -EACCES; 2036 return -EACCES;
2034 } 2037 }
2035 err = check_sock_access(env, regno, off, size, t); 2038 err = check_sock_access(env, insn_idx, regno, off, size, t);
2036 if (!err && value_regno >= 0) 2039 if (!err && value_regno >= 0)
2037 mark_reg_unknown(env, regs, value_regno); 2040 mark_reg_unknown(env, regs, value_regno);
2038 } else { 2041 } else {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e5ede6918050..26d6edab051a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
4963 } 4963 }
4964} 4964}
4965 4965
4966static int perf_event_check_period(struct perf_event *event, u64 value)
4967{
4968 return event->pmu->check_period(event, value);
4969}
4970
4966static int perf_event_period(struct perf_event *event, u64 __user *arg) 4971static int perf_event_period(struct perf_event *event, u64 __user *arg)
4967{ 4972{
4968 u64 value; 4973 u64 value;
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
4979 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4984 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4980 return -EINVAL; 4985 return -EINVAL;
4981 4986
4987 if (perf_event_check_period(event, value))
4988 return -EINVAL;
4989
4982 event_function_call(event, __perf_event_period, &value); 4990 event_function_call(event, __perf_event_period, &value);
4983 4991
4984 return 0; 4992 return 0;
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
9391 return 0; 9399 return 0;
9392} 9400}
9393 9401
9402static int perf_event_nop_int(struct perf_event *event, u64 value)
9403{
9404 return 0;
9405}
9406
9394static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 9407static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
9395 9408
9396static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) 9409static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9691,6 +9704,9 @@ got_cpu_context:
9691 pmu->pmu_disable = perf_pmu_nop_void; 9704 pmu->pmu_disable = perf_pmu_nop_void;
9692 } 9705 }
9693 9706
9707 if (!pmu->check_period)
9708 pmu->check_period = perf_event_nop_int;
9709
9694 if (!pmu->event_idx) 9710 if (!pmu->event_idx)
9695 pmu->event_idx = perf_event_idx_default; 9711 pmu->event_idx = perf_event_idx_default;
9696 9712
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 309ef5a64af5..5ab4fe3b1dcc 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
734 size = sizeof(struct ring_buffer); 734 size = sizeof(struct ring_buffer);
735 size += nr_pages * sizeof(void *); 735 size += nr_pages * sizeof(void *);
736 736
737 if (order_base_2(size) >= MAX_ORDER) 737 if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
738 goto fail; 738 goto fail;
739 739
740 rb = kzalloc(size, GFP_KERNEL); 740 rb = kzalloc(size, GFP_KERNEL);
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index c3484785b179..0e97ca9306ef 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -322,7 +322,7 @@ static bool update_stats(struct psi_group *group)
322 expires = group->next_update; 322 expires = group->next_update;
323 if (now < expires) 323 if (now < expires)
324 goto out; 324 goto out;
325 if (now - expires > psi_period) 325 if (now - expires >= psi_period)
326 missed_periods = div_u64(now - expires, psi_period); 326 missed_periods = div_u64(now - expires, psi_period);
327 327
328 /* 328 /*
diff --git a/kernel/signal.c b/kernel/signal.c
index 99fa8ff06fd9..57b7771e20d7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2436,9 +2436,12 @@ relock:
2436 } 2436 }
2437 2437
2438 /* Has this task already been marked for death? */ 2438 /* Has this task already been marked for death? */
2439 ksig->info.si_signo = signr = SIGKILL; 2439 if (signal_group_exit(signal)) {
2440 if (signal_group_exit(signal)) 2440 ksig->info.si_signo = signr = SIGKILL;
2441 sigdelset(&current->pending.signal, SIGKILL);
2442 recalc_sigpending();
2441 goto fatal; 2443 goto fatal;
2444 }
2442 2445
2443 for (;;) { 2446 for (;;) {
2444 struct k_sigaction *ka; 2447 struct k_sigaction *ka;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c521b7347482..c4238b441624 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
3384 const char tgid_space[] = " "; 3384 const char tgid_space[] = " ";
3385 const char space[] = " "; 3385 const char space[] = " ";
3386 3386
3387 print_event_info(buf, m);
3388
3387 seq_printf(m, "# %s _-----=> irqs-off\n", 3389 seq_printf(m, "# %s _-----=> irqs-off\n",
3388 tgid ? tgid_space : space); 3390 tgid ? tgid_space : space);
3389 seq_printf(m, "# %s / _----=> need-resched\n", 3391 seq_printf(m, "# %s / _----=> need-resched\n",
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d5fb09ebba8b..9eaf07f99212 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = {
861static nokprobe_inline int 861static nokprobe_inline int
862fetch_store_strlen(unsigned long addr) 862fetch_store_strlen(unsigned long addr)
863{ 863{
864 mm_segment_t old_fs;
865 int ret, len = 0; 864 int ret, len = 0;
866 u8 c; 865 u8 c;
867 866
868 old_fs = get_fs();
869 set_fs(KERNEL_DS);
870 pagefault_disable();
871
872 do { 867 do {
873 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); 868 ret = probe_mem_read(&c, (u8 *)addr + len, 1);
874 len++; 869 len++;
875 } while (c && ret == 0 && len < MAX_STRING_SIZE); 870 } while (c && ret == 0 && len < MAX_STRING_SIZE);
876 871
877 pagefault_enable();
878 set_fs(old_fs);
879
880 return (ret < 0) ? ret : len; 872 return (ret < 0) ? ret : len;
881} 873}
882 874
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
index 5c56afc17cf8..4737bb8c07a3 100644
--- a/kernel/trace/trace_probe_tmpl.h
+++ b/kernel/trace/trace_probe_tmpl.h
@@ -180,10 +180,12 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
180 if (unlikely(arg->dynamic)) 180 if (unlikely(arg->dynamic))
181 *dl = make_data_loc(maxlen, dyndata - base); 181 *dl = make_data_loc(maxlen, dyndata - base);
182 ret = process_fetch_insn(arg->code, regs, dl, base); 182 ret = process_fetch_insn(arg->code, regs, dl, base);
183 if (unlikely(ret < 0 && arg->dynamic)) 183 if (unlikely(ret < 0 && arg->dynamic)) {
184 *dl = make_data_loc(0, dyndata - base); 184 *dl = make_data_loc(0, dyndata - base);
185 else 185 } else {
186 dyndata += ret; 186 dyndata += ret;
187 maxlen -= ret;
188 }
187 } 189 }
188} 190}
189 191
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index c6659cb37033..59875eb278ea 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -768,9 +768,11 @@ all_leaves_cluster_together:
768 new_s0->index_key[i] = 768 new_s0->index_key[i] =
769 ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); 769 ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
770 770
771 blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); 771 if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
772 pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); 772 blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
773 new_s0->index_key[keylen - 1] &= ~blank; 773 pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
774 new_s0->index_key[keylen - 1] &= ~blank;
775 }
774 776
775 /* This now reduces to a node splitting exercise for which we'll need 777 /* This now reduces to a node splitting exercise for which we'll need
776 * to regenerate the disparity table. 778 * to regenerate the disparity table.
diff --git a/lib/crc32.c b/lib/crc32.c
index 45b1d67a1767..4a20455d1f61 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
206EXPORT_SYMBOL(crc32_le); 206EXPORT_SYMBOL(crc32_le);
207EXPORT_SYMBOL(__crc32c_le); 207EXPORT_SYMBOL(__crc32c_le);
208 208
209u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); 209u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
210u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); 210u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
211 211
212/* 212/*
213 * This multiplies the polynomials x and y modulo the given modulus. 213 * This multiplies the polynomials x and y modulo the given modulus.
diff --git a/mm/debug.c b/mm/debug.c
index 0abb987dad9b..1611cf00a137 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = {
44 44
45void __dump_page(struct page *page, const char *reason) 45void __dump_page(struct page *page, const char *reason)
46{ 46{
47 struct address_space *mapping = page_mapping(page); 47 struct address_space *mapping;
48 bool page_poisoned = PagePoisoned(page); 48 bool page_poisoned = PagePoisoned(page);
49 int mapcount; 49 int mapcount;
50 50
@@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason)
58 goto hex_only; 58 goto hex_only;
59 } 59 }
60 60
61 mapping = page_mapping(page);
62
61 /* 63 /*
62 * Avoid VM_BUG_ON() in page_mapcount(). 64 * Avoid VM_BUG_ON() in page_mapcount().
63 * page->_mapcount space in struct page is used by sl[aou]b pages to 65 * page->_mapcount space in struct page is used by sl[aou]b pages to
diff --git a/mm/gup.c b/mm/gup.c
index 05acd7e2eb22..75029649baca 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1674,7 +1674,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1674 if (!pmd_present(pmd)) 1674 if (!pmd_present(pmd))
1675 return 0; 1675 return 0;
1676 1676
1677 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { 1677 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
1678 pmd_devmap(pmd))) {
1678 /* 1679 /*
1679 * NUMA hinting faults need to be handled in the GUP 1680 * NUMA hinting faults need to be handled in the GUP
1680 * slowpath for accounting purposes and so that they 1681 * slowpath for accounting purposes and so that they
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index e2bb06c1b45e..5d1065efbd47 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -7,6 +7,8 @@ KCOV_INSTRUMENT := n
7 7
8CFLAGS_REMOVE_common.o = -pg 8CFLAGS_REMOVE_common.o = -pg
9CFLAGS_REMOVE_generic.o = -pg 9CFLAGS_REMOVE_generic.o = -pg
10CFLAGS_REMOVE_tags.o = -pg
11
10# Function splitter causes unnecessary splits in __asan_load1/__asan_store1 12# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
11# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 13# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
12 14
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 73c9cbfdedf4..09b534fbba17 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -361,10 +361,15 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
361 * get different tags. 361 * get different tags.
362 */ 362 */
363static u8 assign_tag(struct kmem_cache *cache, const void *object, 363static u8 assign_tag(struct kmem_cache *cache, const void *object,
364 bool init, bool krealloc) 364 bool init, bool keep_tag)
365{ 365{
366 /* Reuse the same tag for krealloc'ed objects. */ 366 /*
367 if (krealloc) 367 * 1. When an object is kmalloc()'ed, two hooks are called:
368 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
369 * tag only in the first one.
370 * 2. We reuse the same tag for krealloc'ed objects.
371 */
372 if (keep_tag)
368 return get_tag(object); 373 return get_tag(object);
369 374
370 /* 375 /*
@@ -405,12 +410,6 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
405 return (void *)object; 410 return (void *)object;
406} 411}
407 412
408void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
409 gfp_t flags)
410{
411 return kasan_kmalloc(cache, object, cache->object_size, flags);
412}
413
414static inline bool shadow_invalid(u8 tag, s8 shadow_byte) 413static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
415{ 414{
416 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 415 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
@@ -467,7 +466,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
467} 466}
468 467
469static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, 468static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
470 size_t size, gfp_t flags, bool krealloc) 469 size_t size, gfp_t flags, bool keep_tag)
471{ 470{
472 unsigned long redzone_start; 471 unsigned long redzone_start;
473 unsigned long redzone_end; 472 unsigned long redzone_end;
@@ -485,7 +484,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
485 KASAN_SHADOW_SCALE_SIZE); 484 KASAN_SHADOW_SCALE_SIZE);
486 485
487 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 486 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
488 tag = assign_tag(cache, object, false, krealloc); 487 tag = assign_tag(cache, object, false, keep_tag);
489 488
490 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ 489 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
491 kasan_unpoison_shadow(set_tag(object, tag), size); 490 kasan_unpoison_shadow(set_tag(object, tag), size);
@@ -498,10 +497,16 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
498 return set_tag(object, tag); 497 return set_tag(object, tag);
499} 498}
500 499
500void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
501 gfp_t flags)
502{
503 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
504}
505
501void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, 506void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
502 size_t size, gfp_t flags) 507 size_t size, gfp_t flags)
503{ 508{
504 return __kasan_kmalloc(cache, object, size, flags, false); 509 return __kasan_kmalloc(cache, object, size, flags, true);
505} 510}
506EXPORT_SYMBOL(kasan_kmalloc); 511EXPORT_SYMBOL(kasan_kmalloc);
507 512
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 0777649e07c4..63fca3172659 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -46,7 +46,7 @@ void kasan_init_tags(void)
46 int cpu; 46 int cpu;
47 47
48 for_each_possible_cpu(cpu) 48 for_each_possible_cpu(cpu)
49 per_cpu(prng_state, cpu) = get_random_u32(); 49 per_cpu(prng_state, cpu) = (u32)get_cycles();
50} 50}
51 51
52/* 52/*
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index f9d9dc250428..707fa5579f66 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
574 unsigned long flags; 574 unsigned long flags;
575 struct kmemleak_object *object, *parent; 575 struct kmemleak_object *object, *parent;
576 struct rb_node **link, *rb_parent; 576 struct rb_node **link, *rb_parent;
577 unsigned long untagged_ptr;
577 578
578 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); 579 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
579 if (!object) { 580 if (!object) {
@@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
619 620
620 write_lock_irqsave(&kmemleak_lock, flags); 621 write_lock_irqsave(&kmemleak_lock, flags);
621 622
622 min_addr = min(min_addr, ptr); 623 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
623 max_addr = max(max_addr, ptr + size); 624 min_addr = min(min_addr, untagged_ptr);
625 max_addr = max(max_addr, untagged_ptr + size);
624 link = &object_tree_root.rb_node; 626 link = &object_tree_root.rb_node;
625 rb_parent = NULL; 627 rb_parent = NULL;
626 while (*link) { 628 while (*link) {
@@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end,
1333 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); 1335 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1334 unsigned long *end = _end - (BYTES_PER_POINTER - 1); 1336 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1335 unsigned long flags; 1337 unsigned long flags;
1338 unsigned long untagged_ptr;
1336 1339
1337 read_lock_irqsave(&kmemleak_lock, flags); 1340 read_lock_irqsave(&kmemleak_lock, flags);
1338 for (ptr = start; ptr < end; ptr++) { 1341 for (ptr = start; ptr < end; ptr++) {
@@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end,
1347 pointer = *ptr; 1350 pointer = *ptr;
1348 kasan_enable_current(); 1351 kasan_enable_current();
1349 1352
1350 if (pointer < min_addr || pointer >= max_addr) 1353 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1354 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1351 continue; 1355 continue;
1352 1356
1353 /* 1357 /*
diff --git a/mm/memblock.c b/mm/memblock.c
index 022d4cbb3618..ea31045ba704 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -26,6 +26,13 @@
26 26
27#include "internal.h" 27#include "internal.h"
28 28
29#define INIT_MEMBLOCK_REGIONS 128
30#define INIT_PHYSMEM_REGIONS 4
31
32#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
33# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
34#endif
35
29/** 36/**
30 * DOC: memblock overview 37 * DOC: memblock overview
31 * 38 *
@@ -92,7 +99,7 @@ unsigned long max_pfn;
92unsigned long long max_possible_pfn; 99unsigned long long max_possible_pfn;
93 100
94static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 101static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
95static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 102static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
96#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 103#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
97static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 104static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
98#endif 105#endif
@@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = {
105 112
106 .reserved.regions = memblock_reserved_init_regions, 113 .reserved.regions = memblock_reserved_init_regions,
107 .reserved.cnt = 1, /* empty dummy entry */ 114 .reserved.cnt = 1, /* empty dummy entry */
108 .reserved.max = INIT_MEMBLOCK_REGIONS, 115 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
109 .reserved.name = "reserved", 116 .reserved.name = "reserved",
110 117
111#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 118#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 124e794867c5..1ad28323fb9f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page)
1188 return PageBuddy(page) && page_order(page) >= pageblock_order; 1188 return PageBuddy(page) && page_order(page) >= pageblock_order;
1189} 1189}
1190 1190
1191/* Return the start of the next active pageblock after a given page */ 1191/* Return the pfn of the start of the next active pageblock after a given pfn */
1192static struct page *next_active_pageblock(struct page *page) 1192static unsigned long next_active_pageblock(unsigned long pfn)
1193{ 1193{
1194 struct page *page = pfn_to_page(pfn);
1195
1194 /* Ensure the starting page is pageblock-aligned */ 1196 /* Ensure the starting page is pageblock-aligned */
1195 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); 1197 BUG_ON(pfn & (pageblock_nr_pages - 1));
1196 1198
1197 /* If the entire pageblock is free, move to the end of free page */ 1199 /* If the entire pageblock is free, move to the end of free page */
1198 if (pageblock_free(page)) { 1200 if (pageblock_free(page)) {
@@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page)
1200 /* be careful. we don't have locks, page_order can be changed.*/ 1202 /* be careful. we don't have locks, page_order can be changed.*/
1201 order = page_order(page); 1203 order = page_order(page);
1202 if ((order < MAX_ORDER) && (order >= pageblock_order)) 1204 if ((order < MAX_ORDER) && (order >= pageblock_order))
1203 return page + (1 << order); 1205 return pfn + (1 << order);
1204 } 1206 }
1205 1207
1206 return page + pageblock_nr_pages; 1208 return pfn + pageblock_nr_pages;
1207} 1209}
1208 1210
1209static bool is_pageblock_removable_nolock(struct page *page) 1211static bool is_pageblock_removable_nolock(unsigned long pfn)
1210{ 1212{
1213 struct page *page = pfn_to_page(pfn);
1211 struct zone *zone; 1214 struct zone *zone;
1212 unsigned long pfn;
1213 1215
1214 /* 1216 /*
1215 * We have to be careful here because we are iterating over memory 1217 * We have to be careful here because we are iterating over memory
@@ -1232,13 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
1232/* Checks if this range of memory is likely to be hot-removable. */ 1234/* Checks if this range of memory is likely to be hot-removable. */
1233bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 1235bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1234{ 1236{
1235 struct page *page = pfn_to_page(start_pfn); 1237 unsigned long end_pfn, pfn;
1236 unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page))); 1238
1237 struct page *end_page = pfn_to_page(end_pfn); 1239 end_pfn = min(start_pfn + nr_pages,
1240 zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
1238 1241
1239 /* Check the starting page of each pageblock within the range */ 1242 /* Check the starting page of each pageblock within the range */
1240 for (; page < end_page; page = next_active_pageblock(page)) { 1243 for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
1241 if (!is_pageblock_removable_nolock(page)) 1244 if (!is_pageblock_removable_nolock(pfn))
1242 return false; 1245 return false;
1243 cond_resched(); 1246 cond_resched();
1244 } 1247 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d4496d9d34f5..ee2bce59d2bf 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1314 nodemask_t *nodes) 1314 nodemask_t *nodes)
1315{ 1315{
1316 unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1316 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1317 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 1317 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1318 1318
1319 if (copy > nbytes) { 1319 if (copy > nbytes) {
1320 if (copy > PAGE_SIZE) 1320 if (copy > PAGE_SIZE)
@@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy,
1491 int uninitialized_var(pval); 1491 int uninitialized_var(pval);
1492 nodemask_t nodes; 1492 nodemask_t nodes;
1493 1493
1494 if (nmask != NULL && maxnode < MAX_NUMNODES) 1494 if (nmask != NULL && maxnode < nr_node_ids)
1495 return -EINVAL; 1495 return -EINVAL;
1496 1496
1497 err = do_get_mempolicy(&pval, &nodes, addr, flags); 1497 err = do_get_mempolicy(&pval, &nodes, addr, flags);
@@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1527 unsigned long nr_bits, alloc_size; 1527 unsigned long nr_bits, alloc_size;
1528 DECLARE_BITMAP(bm, MAX_NUMNODES); 1528 DECLARE_BITMAP(bm, MAX_NUMNODES);
1529 1529
1530 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 1530 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1531 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1531 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1532 1532
1533 if (nmask) 1533 if (nmask)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 35fdde041f5c..0b9f577b1a2a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone)
2170 2170
2171 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2171 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2172 watermark_boost_factor, 10000); 2172 watermark_boost_factor, 10000);
2173
2174 /*
2175 * high watermark may be uninitialised if fragmentation occurs
2176 * very early in boot so do not boost. We do not fall
2177 * through and boost by pageblock_nr_pages as failing
2178 * allocations that early means that reclaim is not going
2179 * to help and it may even be impossible to reclaim the
2180 * boosted watermark resulting in a hang.
2181 */
2182 if (!max_boost)
2183 return;
2184
2173 max_boost = max(pageblock_nr_pages, max_boost); 2185 max_boost = max(pageblock_nr_pages, max_boost);
2174 2186
2175 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2187 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
@@ -4675,11 +4687,11 @@ refill:
4675 /* Even if we own the page, we do not use atomic_set(). 4687 /* Even if we own the page, we do not use atomic_set().
4676 * This would break get_page_unless_zero() users. 4688 * This would break get_page_unless_zero() users.
4677 */ 4689 */
4678 page_ref_add(page, size - 1); 4690 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
4679 4691
4680 /* reset page count bias and offset to start of new frag */ 4692 /* reset page count bias and offset to start of new frag */
4681 nc->pfmemalloc = page_is_pfmemalloc(page); 4693 nc->pfmemalloc = page_is_pfmemalloc(page);
4682 nc->pagecnt_bias = size; 4694 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4683 nc->offset = size; 4695 nc->offset = size;
4684 } 4696 }
4685 4697
@@ -4695,10 +4707,10 @@ refill:
4695 size = nc->size; 4707 size = nc->size;
4696#endif 4708#endif
4697 /* OK, page count is 0, we can safely set it */ 4709 /* OK, page count is 0, we can safely set it */
4698 set_page_count(page, size); 4710 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
4699 4711
4700 /* reset page count bias and offset to start of new frag */ 4712 /* reset page count bias and offset to start of new frag */
4701 nc->pagecnt_bias = size; 4713 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4702 offset = size - fragsz; 4714 offset = size - fragsz;
4703 } 4715 }
4704 4716
diff --git a/mm/page_ext.c b/mm/page_ext.c
index ae44f7adbe07..8c78b8d45117 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -398,10 +398,8 @@ void __init page_ext_init(void)
398 * We know some arch can have a nodes layout such as 398 * We know some arch can have a nodes layout such as
399 * -------------pfn--------------> 399 * -------------pfn-------------->
400 * N0 | N1 | N2 | N0 | N1 | N2|.... 400 * N0 | N1 | N2 | N0 | N1 | N2|....
401 *
402 * Take into account DEFERRED_STRUCT_PAGE_INIT.
403 */ 401 */
404 if (early_pfn_to_nid(pfn) != nid) 402 if (pfn_to_nid(pfn) != nid)
405 continue; 403 continue;
406 if (init_section_page_ext(pfn, nid)) 404 if (init_section_page_ext(pfn, nid))
407 goto oom; 405 goto oom;
diff --git a/mm/shmem.c b/mm/shmem.c
index 6ece1e2fe76e..0905215fb016 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2854,10 +2854,14 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
2854 * No ordinary (disk based) filesystem counts links as inodes; 2854 * No ordinary (disk based) filesystem counts links as inodes;
2855 * but each new link needs a new dentry, pinning lowmem, and 2855 * but each new link needs a new dentry, pinning lowmem, and
2856 * tmpfs dentries cannot be pruned until they are unlinked. 2856 * tmpfs dentries cannot be pruned until they are unlinked.
2857 * But if an O_TMPFILE file is linked into the tmpfs, the
2858 * first link must skip that, to get the accounting right.
2857 */ 2859 */
2858 ret = shmem_reserve_inode(inode->i_sb); 2860 if (inode->i_nlink) {
2859 if (ret) 2861 ret = shmem_reserve_inode(inode->i_sb);
2860 goto out; 2862 if (ret)
2863 goto out;
2864 }
2861 2865
2862 dir->i_size += BOGO_DIRENT_SIZE; 2866 dir->i_size += BOGO_DIRENT_SIZE;
2863 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 2867 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
diff --git a/mm/slab.c b/mm/slab.c
index 78eb8c5bf4e4..91c1863df93d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2359,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
2359 void *freelist; 2359 void *freelist;
2360 void *addr = page_address(page); 2360 void *addr = page_address(page);
2361 2361
2362 page->s_mem = kasan_reset_tag(addr) + colour_off; 2362 page->s_mem = addr + colour_off;
2363 page->active = 0; 2363 page->active = 0;
2364 2364
2365 if (OBJFREELIST_SLAB(cachep)) 2365 if (OBJFREELIST_SLAB(cachep))
@@ -2368,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
2368 /* Slab management obj is off-slab. */ 2368 /* Slab management obj is off-slab. */
2369 freelist = kmem_cache_alloc_node(cachep->freelist_cache, 2369 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2370 local_flags, nodeid); 2370 local_flags, nodeid);
2371 freelist = kasan_reset_tag(freelist);
2371 if (!freelist) 2372 if (!freelist)
2372 return NULL; 2373 return NULL;
2373 } else { 2374 } else {
@@ -2681,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
2681 2682
2682 offset *= cachep->colour_off; 2683 offset *= cachep->colour_off;
2683 2684
2685 /*
2686 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
2687 * page_address() in the latter returns a non-tagged pointer,
2688 * as it should be for slab pages.
2689 */
2690 kasan_poison_slab(page);
2691
2684 /* Get slab management. */ 2692 /* Get slab management. */
2685 freelist = alloc_slabmgmt(cachep, page, offset, 2693 freelist = alloc_slabmgmt(cachep, page, offset,
2686 local_flags & ~GFP_CONSTRAINT_MASK, page_node); 2694 local_flags & ~GFP_CONSTRAINT_MASK, page_node);
@@ -2689,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
2689 2697
2690 slab_map_pages(cachep, page, freelist); 2698 slab_map_pages(cachep, page, freelist);
2691 2699
2692 kasan_poison_slab(page);
2693 cache_init_objs(cachep, page); 2700 cache_init_objs(cachep, page);
2694 2701
2695 if (gfpflags_allow_blocking(local_flags)) 2702 if (gfpflags_allow_blocking(local_flags))
@@ -3540,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3540{ 3547{
3541 void *ret = slab_alloc(cachep, flags, _RET_IP_); 3548 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3542 3549
3543 ret = kasan_slab_alloc(cachep, ret, flags);
3544 trace_kmem_cache_alloc(_RET_IP_, ret, 3550 trace_kmem_cache_alloc(_RET_IP_, ret,
3545 cachep->object_size, cachep->size, flags); 3551 cachep->object_size, cachep->size, flags);
3546 3552
@@ -3630,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3630{ 3636{
3631 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3637 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3632 3638
3633 ret = kasan_slab_alloc(cachep, ret, flags);
3634 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3639 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3635 cachep->object_size, cachep->size, 3640 cachep->object_size, cachep->size,
3636 flags, nodeid); 3641 flags, nodeid);
@@ -4408,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4408 unsigned int objnr; 4413 unsigned int objnr;
4409 unsigned long offset; 4414 unsigned long offset;
4410 4415
4416 ptr = kasan_reset_tag(ptr);
4417
4411 /* Find and validate object. */ 4418 /* Find and validate object. */
4412 cachep = page->slab_cache; 4419 cachep = page->slab_cache;
4413 objnr = obj_to_index(cachep, page, (void *)ptr); 4420 objnr = obj_to_index(cachep, page, (void *)ptr);
diff --git a/mm/slab.h b/mm/slab.h
index 4190c24ef0e9..384105318779 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -437,11 +437,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
437 437
438 flags &= gfp_allowed_mask; 438 flags &= gfp_allowed_mask;
439 for (i = 0; i < size; i++) { 439 for (i = 0; i < size; i++) {
440 void *object = p[i]; 440 p[i] = kasan_slab_alloc(s, p[i], flags);
441 441 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
442 kmemleak_alloc_recursive(object, s->object_size, 1, 442 kmemleak_alloc_recursive(p[i], s->object_size, 1,
443 s->flags, flags); 443 s->flags, flags);
444 p[i] = kasan_slab_alloc(s, object, flags);
445 } 444 }
446 445
447 if (memcg_kmem_enabled()) 446 if (memcg_kmem_enabled())
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 81732d05e74a..f9d89c1b5977 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1228,8 +1228,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1228 flags |= __GFP_COMP; 1228 flags |= __GFP_COMP;
1229 page = alloc_pages(flags, order); 1229 page = alloc_pages(flags, order);
1230 ret = page ? page_address(page) : NULL; 1230 ret = page ? page_address(page) : NULL;
1231 kmemleak_alloc(ret, size, 1, flags);
1232 ret = kasan_kmalloc_large(ret, size, flags); 1231 ret = kasan_kmalloc_large(ret, size, flags);
1232 /* As ret might get tagged, call kmemleak hook after KASAN. */
1233 kmemleak_alloc(ret, size, 1, flags);
1233 return ret; 1234 return ret;
1234} 1235}
1235EXPORT_SYMBOL(kmalloc_order); 1236EXPORT_SYMBOL(kmalloc_order);
diff --git a/mm/slub.c b/mm/slub.c
index 1e3d0ec4e200..dc777761b6b7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
249 unsigned long ptr_addr) 249 unsigned long ptr_addr)
250{ 250{
251#ifdef CONFIG_SLAB_FREELIST_HARDENED 251#ifdef CONFIG_SLAB_FREELIST_HARDENED
252 return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); 252 /*
253 * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
254 * Normally, this doesn't cause any issues, as both set_freepointer()
255 * and get_freepointer() are called with a pointer with the same tag.
256 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
257 * example, when __free_slub() iterates over objects in a cache, it
258 * passes untagged pointers to check_object(). check_object() in turns
259 * calls get_freepointer() with an untagged pointer, which causes the
260 * freepointer to be restored incorrectly.
261 */
262 return (void *)((unsigned long)ptr ^ s->random ^
263 (unsigned long)kasan_reset_tag((void *)ptr_addr));
253#else 264#else
254 return ptr; 265 return ptr;
255#endif 266#endif
@@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
303 __p < (__addr) + (__objects) * (__s)->size; \ 314 __p < (__addr) + (__objects) * (__s)->size; \
304 __p += (__s)->size) 315 __p += (__s)->size)
305 316
306#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
307 for (__p = fixup_red_left(__s, __addr), __idx = 1; \
308 __idx <= __objects; \
309 __p += (__s)->size, __idx++)
310
311/* Determine object index from a given position */ 317/* Determine object index from a given position */
312static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) 318static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
313{ 319{
314 return (p - addr) / s->size; 320 return (kasan_reset_tag(p) - addr) / s->size;
315} 321}
316 322
317static inline unsigned int order_objects(unsigned int order, unsigned int size) 323static inline unsigned int order_objects(unsigned int order, unsigned int size)
@@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s,
507 return 1; 513 return 1;
508 514
509 base = page_address(page); 515 base = page_address(page);
516 object = kasan_reset_tag(object);
510 object = restore_red_left(s, object); 517 object = restore_red_left(s, object);
511 if (object < base || object >= base + page->objects * s->size || 518 if (object < base || object >= base + page->objects * s->size ||
512 (object - base) % s->size) { 519 (object - base) % s->size) {
@@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
1075 init_tracking(s, object); 1082 init_tracking(s, object);
1076} 1083}
1077 1084
1085static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
1086{
1087 if (!(s->flags & SLAB_POISON))
1088 return;
1089
1090 metadata_access_enable();
1091 memset(addr, POISON_INUSE, PAGE_SIZE << order);
1092 metadata_access_disable();
1093}
1094
1078static inline int alloc_consistency_checks(struct kmem_cache *s, 1095static inline int alloc_consistency_checks(struct kmem_cache *s,
1079 struct page *page, 1096 struct page *page,
1080 void *object, unsigned long addr) 1097 void *object, unsigned long addr)
@@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
1330#else /* !CONFIG_SLUB_DEBUG */ 1347#else /* !CONFIG_SLUB_DEBUG */
1331static inline void setup_object_debug(struct kmem_cache *s, 1348static inline void setup_object_debug(struct kmem_cache *s,
1332 struct page *page, void *object) {} 1349 struct page *page, void *object) {}
1350static inline void setup_page_debug(struct kmem_cache *s,
1351 void *addr, int order) {}
1333 1352
1334static inline int alloc_debug_processing(struct kmem_cache *s, 1353static inline int alloc_debug_processing(struct kmem_cache *s,
1335 struct page *page, void *object, unsigned long addr) { return 0; } 1354 struct page *page, void *object, unsigned long addr) { return 0; }
@@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
1374 */ 1393 */
1375static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1394static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1376{ 1395{
1396 ptr = kasan_kmalloc_large(ptr, size, flags);
1397 /* As ptr might get tagged, call kmemleak hook after KASAN. */
1377 kmemleak_alloc(ptr, size, 1, flags); 1398 kmemleak_alloc(ptr, size, 1, flags);
1378 return kasan_kmalloc_large(ptr, size, flags); 1399 return ptr;
1379} 1400}
1380 1401
1381static __always_inline void kfree_hook(void *x) 1402static __always_inline void kfree_hook(void *x)
@@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1641 if (page_is_pfmemalloc(page)) 1662 if (page_is_pfmemalloc(page))
1642 SetPageSlabPfmemalloc(page); 1663 SetPageSlabPfmemalloc(page);
1643 1664
1665 kasan_poison_slab(page);
1666
1644 start = page_address(page); 1667 start = page_address(page);
1645 1668
1646 if (unlikely(s->flags & SLAB_POISON)) 1669 setup_page_debug(s, start, order);
1647 memset(start, POISON_INUSE, PAGE_SIZE << order);
1648
1649 kasan_poison_slab(page);
1650 1670
1651 shuffle = shuffle_freelist(s, page); 1671 shuffle = shuffle_freelist(s, page);
1652 1672
1653 if (!shuffle) { 1673 if (!shuffle) {
1654 for_each_object_idx(p, idx, s, start, page->objects) {
1655 if (likely(idx < page->objects)) {
1656 next = p + s->size;
1657 next = setup_object(s, page, next);
1658 set_freepointer(s, p, next);
1659 } else
1660 set_freepointer(s, p, NULL);
1661 }
1662 start = fixup_red_left(s, start); 1674 start = fixup_red_left(s, start);
1663 start = setup_object(s, page, start); 1675 start = setup_object(s, page, start);
1664 page->freelist = start; 1676 page->freelist = start;
1677 for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1678 next = p + s->size;
1679 next = setup_object(s, page, next);
1680 set_freepointer(s, p, next);
1681 p = next;
1682 }
1683 set_freepointer(s, p, NULL);
1665 } 1684 }
1666 1685
1667 page->inuse = page->objects; 1686 page->inuse = page->objects;
diff --git a/mm/swap.c b/mm/swap.c
index 4929bc1be60e..4d7d37eb3c40 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu)
320{ 320{
321} 321}
322 322
323static bool need_activate_page_drain(int cpu)
324{
325 return false;
326}
327
328void activate_page(struct page *page) 323void activate_page(struct page *page)
329{ 324{
330 struct zone *zone = page_zone(page); 325 struct zone *zone = page_zone(page);
@@ -653,13 +648,15 @@ void lru_add_drain(void)
653 put_cpu(); 648 put_cpu();
654} 649}
655 650
651#ifdef CONFIG_SMP
652
653static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
654
656static void lru_add_drain_per_cpu(struct work_struct *dummy) 655static void lru_add_drain_per_cpu(struct work_struct *dummy)
657{ 656{
658 lru_add_drain(); 657 lru_add_drain();
659} 658}
660 659
661static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
662
663/* 660/*
664 * Doesn't need any cpu hotplug locking because we do rely on per-cpu 661 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
665 * kworkers being shut down before our page_alloc_cpu_dead callback is 662 * kworkers being shut down before our page_alloc_cpu_dead callback is
@@ -702,6 +699,12 @@ void lru_add_drain_all(void)
702 699
703 mutex_unlock(&lock); 700 mutex_unlock(&lock);
704} 701}
702#else
703void lru_add_drain_all(void)
704{
705 lru_add_drain();
706}
707#endif
705 708
706/** 709/**
707 * release_pages - batched put_page() 710 * release_pages - batched put_page()
diff --git a/mm/util.c b/mm/util.c
index 1ea055138043..379319b1bcfd 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -150,7 +150,7 @@ void *memdup_user(const void __user *src, size_t len)
150{ 150{
151 void *p; 151 void *p;
152 152
153 p = kmalloc_track_caller(len, GFP_USER); 153 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
154 if (!p) 154 if (!p)
155 return ERR_PTR(-ENOMEM); 155 return ERR_PTR(-ENOMEM);
156 156
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a714c4f800e9..e979705bbf32 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
491 delta = freeable / 2; 491 delta = freeable / 2;
492 } 492 }
493 493
494 /*
495 * Make sure we apply some minimal pressure on default priority
496 * even on small cgroups. Stale objects are not only consuming memory
497 * by themselves, but can also hold a reference to a dying cgroup,
498 * preventing it from being reclaimed. A dying cgroup with all
499 * corresponding structures like per-cpu stats and kmem caches
500 * can be really big, so it may lead to a significant waste of memory.
501 */
502 delta = max_t(unsigned long long, delta, min(freeable, batch_size));
503
504 total_scan += delta; 494 total_scan += delta;
505 if (total_scan < 0) { 495 if (total_scan < 0) {
506 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", 496 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index b85ca809e509..ffc83bebfe40 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -227,6 +227,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
227 227
228 switch (ntohs(ethhdr->h_proto)) { 228 switch (ntohs(ethhdr->h_proto)) {
229 case ETH_P_8021Q: 229 case ETH_P_8021Q:
230 if (!pskb_may_pull(skb, sizeof(*vhdr)))
231 goto dropped;
230 vhdr = vlan_eth_hdr(skb); 232 vhdr = vlan_eth_hdr(skb);
231 233
232 /* drop batman-in-batman packets to prevent loops */ 234 /* drop batman-in-batman packets to prevent loops */
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..e31e1b20f7f4 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -13,27 +13,13 @@
13#include <net/sock.h> 13#include <net/sock.h>
14#include <net/tcp.h> 14#include <net/tcp.h>
15 15
16static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, 16static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) 17 u32 *retval, u32 *time)
18{
19 u32 ret;
20
21 preempt_disable();
22 rcu_read_lock();
23 bpf_cgroup_storage_set(storage);
24 ret = BPF_PROG_RUN(prog, ctx);
25 rcu_read_unlock();
26 preempt_enable();
27
28 return ret;
29}
30
31static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32 u32 *time)
33{ 18{
34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 19 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
35 enum bpf_cgroup_storage_type stype; 20 enum bpf_cgroup_storage_type stype;
36 u64 time_start, time_spent = 0; 21 u64 time_start, time_spent = 0;
22 int ret = 0;
37 u32 i; 23 u32 i;
38 24
39 for_each_cgroup_storage_type(stype) { 25 for_each_cgroup_storage_type(stype) {
@@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
48 34
49 if (!repeat) 35 if (!repeat)
50 repeat = 1; 36 repeat = 1;
37
38 rcu_read_lock();
39 preempt_disable();
51 time_start = ktime_get_ns(); 40 time_start = ktime_get_ns();
52 for (i = 0; i < repeat; i++) { 41 for (i = 0; i < repeat; i++) {
53 *ret = bpf_test_run_one(prog, ctx, storage); 42 bpf_cgroup_storage_set(storage);
43 *retval = BPF_PROG_RUN(prog, ctx);
44
45 if (signal_pending(current)) {
46 ret = -EINTR;
47 break;
48 }
49
54 if (need_resched()) { 50 if (need_resched()) {
55 if (signal_pending(current))
56 break;
57 time_spent += ktime_get_ns() - time_start; 51 time_spent += ktime_get_ns() - time_start;
52 preempt_enable();
53 rcu_read_unlock();
54
58 cond_resched(); 55 cond_resched();
56
57 rcu_read_lock();
58 preempt_disable();
59 time_start = ktime_get_ns(); 59 time_start = ktime_get_ns();
60 } 60 }
61 } 61 }
62 time_spent += ktime_get_ns() - time_start; 62 time_spent += ktime_get_ns() - time_start;
63 preempt_enable();
64 rcu_read_unlock();
65
63 do_div(time_spent, repeat); 66 do_div(time_spent, repeat);
64 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 67 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
65 68
66 for_each_cgroup_storage_type(stype) 69 for_each_cgroup_storage_type(stype)
67 bpf_cgroup_storage_free(storage[stype]); 70 bpf_cgroup_storage_free(storage[stype]);
68 71
69 return 0; 72 return ret;
70} 73}
71 74
72static int bpf_test_finish(const union bpf_attr *kattr, 75static int bpf_test_finish(const union bpf_attr *kattr,
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3aeff0895669..ac92b2eb32b1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1204,14 +1204,7 @@ static void br_multicast_query_received(struct net_bridge *br,
1204 return; 1204 return;
1205 1205
1206 br_multicast_update_query_timer(br, query, max_delay); 1206 br_multicast_update_query_timer(br, query, max_delay);
1207 1207 br_multicast_mark_router(br, port);
1208 /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
1209 * the arrival port for IGMP Queries where the source address
1210 * is 0.0.0.0 should not be added to router port list.
1211 */
1212 if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
1213 saddr->proto == htons(ETH_P_IPV6))
1214 br_multicast_mark_router(br, port);
1215} 1208}
1216 1209
1217static void br_ip4_multicast_query(struct net_bridge *br, 1210static void br_ip4_multicast_query(struct net_bridge *br,
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3661cdd927f1..7e71b0df1fbc 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2058,6 +2058,8 @@ static int process_connect(struct ceph_connection *con)
2058 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 2058 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
2059 2059
2060 if (con->auth) { 2060 if (con->auth) {
2061 int len = le32_to_cpu(con->in_reply.authorizer_len);
2062
2061 /* 2063 /*
2062 * Any connection that defines ->get_authorizer() 2064 * Any connection that defines ->get_authorizer()
2063 * should also define ->add_authorizer_challenge() and 2065 * should also define ->add_authorizer_challenge() and
@@ -2067,8 +2069,7 @@ static int process_connect(struct ceph_connection *con)
2067 */ 2069 */
2068 if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) { 2070 if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
2069 ret = con->ops->add_authorizer_challenge( 2071 ret = con->ops->add_authorizer_challenge(
2070 con, con->auth->authorizer_reply_buf, 2072 con, con->auth->authorizer_reply_buf, len);
2071 le32_to_cpu(con->in_reply.authorizer_len));
2072 if (ret < 0) 2073 if (ret < 0)
2073 return ret; 2074 return ret;
2074 2075
@@ -2078,10 +2079,12 @@ static int process_connect(struct ceph_connection *con)
2078 return 0; 2079 return 0;
2079 } 2080 }
2080 2081
2081 ret = con->ops->verify_authorizer_reply(con); 2082 if (len) {
2082 if (ret < 0) { 2083 ret = con->ops->verify_authorizer_reply(con);
2083 con->error_msg = "bad authorize reply"; 2084 if (ret < 0) {
2084 return ret; 2085 con->error_msg = "bad authorize reply";
2086 return ret;
2087 }
2085 } 2088 }
2086 } 2089 }
2087 2090
diff --git a/net/compat.c b/net/compat.c
index 959d1c51826d..3d348198004f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -388,8 +388,12 @@ static int __compat_sys_setsockopt(int fd, int level, int optname,
388 char __user *optval, unsigned int optlen) 388 char __user *optval, unsigned int optlen)
389{ 389{
390 int err; 390 int err;
391 struct socket *sock = sockfd_lookup(fd, &err); 391 struct socket *sock;
392
393 if (optlen > INT_MAX)
394 return -EINVAL;
392 395
396 sock = sockfd_lookup(fd, &err);
393 if (sock) { 397 if (sock) {
394 err = security_socket_setsockopt(sock, level, optname); 398 err = security_socket_setsockopt(sock, level, optname);
395 if (err) { 399 if (err) {
diff --git a/net/core/dev.c b/net/core/dev.c
index 8e276e0192a1..5d03889502eb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8152,7 +8152,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
8152 netdev_features_t feature; 8152 netdev_features_t feature;
8153 int feature_bit; 8153 int feature_bit;
8154 8154
8155 for_each_netdev_feature(&upper_disables, feature_bit) { 8155 for_each_netdev_feature(upper_disables, feature_bit) {
8156 feature = __NETIF_F_BIT(feature_bit); 8156 feature = __NETIF_F_BIT(feature_bit);
8157 if (!(upper->wanted_features & feature) 8157 if (!(upper->wanted_features & feature)
8158 && (features & feature)) { 8158 && (features & feature)) {
@@ -8172,7 +8172,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
8172 netdev_features_t feature; 8172 netdev_features_t feature;
8173 int feature_bit; 8173 int feature_bit;
8174 8174
8175 for_each_netdev_feature(&upper_disables, feature_bit) { 8175 for_each_netdev_feature(upper_disables, feature_bit) {
8176 feature = __NETIF_F_BIT(feature_bit); 8176 feature = __NETIF_F_BIT(feature_bit);
8177 if (!(features & feature) && (lower->features & feature)) { 8177 if (!(features & feature) && (lower->features & feature)) {
8178 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 8178 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
diff --git a/net/core/filter.c b/net/core/filter.c
index 7a54dc11ac2d..f7d0004fc160 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2789,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2789 u32 off = skb_mac_header_len(skb); 2789 u32 off = skb_mac_header_len(skb);
2790 int ret; 2790 int ret;
2791 2791
2792 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2792 if (!skb_is_gso_tcp(skb))
2793 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2794 return -ENOTSUPP; 2793 return -ENOTSUPP;
2795 2794
2796 ret = skb_cow(skb, len_diff); 2795 ret = skb_cow(skb, len_diff);
@@ -2831,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2831 u32 off = skb_mac_header_len(skb); 2830 u32 off = skb_mac_header_len(skb);
2832 int ret; 2831 int ret;
2833 2832
2834 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2833 if (!skb_is_gso_tcp(skb))
2835 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2836 return -ENOTSUPP; 2834 return -ENOTSUPP;
2837 2835
2838 ret = skb_unclone(skb, GFP_ATOMIC); 2836 ret = skb_unclone(skb, GFP_ATOMIC);
@@ -2957,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2957 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2955 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2958 int ret; 2956 int ret;
2959 2957
2960 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2958 if (!skb_is_gso_tcp(skb))
2961 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2962 return -ENOTSUPP; 2959 return -ENOTSUPP;
2963 2960
2964 ret = skb_cow(skb, len_diff); 2961 ret = skb_cow(skb, len_diff);
@@ -2987,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2987 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2984 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2988 int ret; 2985 int ret;
2989 2986
2990 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2987 if (!skb_is_gso_tcp(skb))
2991 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2992 return -ENOTSUPP; 2988 return -ENOTSUPP;
2993 2989
2994 ret = skb_unclone(skb, GFP_ATOMIC); 2990 ret = skb_unclone(skb, GFP_ATOMIC);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 26d848484912..2415d9cb9b89 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
356 */ 356 */
357void *netdev_alloc_frag(unsigned int fragsz) 357void *netdev_alloc_frag(unsigned int fragsz)
358{ 358{
359 fragsz = SKB_DATA_ALIGN(fragsz);
360
359 return __netdev_alloc_frag(fragsz, GFP_ATOMIC); 361 return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
360} 362}
361EXPORT_SYMBOL(netdev_alloc_frag); 363EXPORT_SYMBOL(netdev_alloc_frag);
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
369 371
370void *napi_alloc_frag(unsigned int fragsz) 372void *napi_alloc_frag(unsigned int fragsz)
371{ 373{
374 fragsz = SKB_DATA_ALIGN(fragsz);
375
372 return __napi_alloc_frag(fragsz, GFP_ATOMIC); 376 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
373} 377}
374EXPORT_SYMBOL(napi_alloc_frag); 378EXPORT_SYMBOL(napi_alloc_frag);
diff --git a/net/core/sock.c b/net/core/sock.c
index 6aa2e7e0b4fb..bc3512f230a3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2380,7 +2380,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2380 } 2380 }
2381 2381
2382 if (sk_has_memory_pressure(sk)) { 2382 if (sk_has_memory_pressure(sk)) {
2383 int alloc; 2383 u64 alloc;
2384 2384
2385 if (!sk_under_memory_pressure(sk)) 2385 if (!sk_under_memory_pressure(sk))
2386 return 1; 2386 return 1;
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 2d7e01b23572..2a2a878b5ce3 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
69 69
70int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 70int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
71{ 71{
72 u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING;
73 struct dsa_switch *ds = dp->ds; 72 struct dsa_switch *ds = dp->ds;
74 int port = dp->index; 73 int port = dp->index;
75 int err; 74 int err;
@@ -80,7 +79,8 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
80 return err; 79 return err;
81 } 80 }
82 81
83 dsa_port_set_state_now(dp, stp_state); 82 if (!dp->bridge_dev)
83 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
84 84
85 return 0; 85 return 0;
86} 86}
@@ -90,7 +90,8 @@ void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy)
90 struct dsa_switch *ds = dp->ds; 90 struct dsa_switch *ds = dp->ds;
91 int port = dp->index; 91 int port = dp->index;
92 92
93 dsa_port_set_state_now(dp, BR_STATE_DISABLED); 93 if (!dp->bridge_dev)
94 dsa_port_set_state_now(dp, BR_STATE_DISABLED);
94 95
95 if (ds->ops->port_disable) 96 if (ds->ops->port_disable)
96 ds->ops->port_disable(ds, port, phy); 97 ds->ops->port_disable(ds, port, phy);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 5459f41fc26f..10e809b296ec 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
328 skb->len += tailen; 328 skb->len += tailen;
329 skb->data_len += tailen; 329 skb->data_len += tailen;
330 skb->truesize += tailen; 330 skb->truesize += tailen;
331 if (sk) 331 if (sk && sk_fullsock(sk))
332 refcount_add(tailen, &sk->sk_wmem_alloc); 332 refcount_add(tailen, &sk->sk_wmem_alloc);
333 333
334 goto out; 334 goto out;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 1a4e9ff02762..5731670c560b 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
108 + nla_total_size(1) /* INET_DIAG_TOS */ 108 + nla_total_size(1) /* INET_DIAG_TOS */
109 + nla_total_size(1) /* INET_DIAG_TCLASS */ 109 + nla_total_size(1) /* INET_DIAG_TCLASS */
110 + nla_total_size(4) /* INET_DIAG_MARK */ 110 + nla_total_size(4) /* INET_DIAG_MARK */
111 + nla_total_size(4) /* INET_DIAG_CLASS_ID */
111 + nla_total_size(sizeof(struct inet_diag_meminfo)) 112 + nla_total_size(sizeof(struct inet_diag_meminfo))
112 + nla_total_size(sizeof(struct inet_diag_msg)) 113 + nla_total_size(sizeof(struct inet_diag_msg))
113 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) 114 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
287 goto errout; 288 goto errout;
288 } 289 }
289 290
290 if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) { 291 if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
292 ext & (1 << (INET_DIAG_TCLASS - 1))) {
291 u32 classid = 0; 293 u32 classid = 0;
292 294
293#ifdef CONFIG_SOCK_CGROUP_DATA 295#ifdef CONFIG_SOCK_CGROUP_DATA
294 classid = sock_cgroup_classid(&sk->sk_cgrp_data); 296 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
295#endif 297#endif
298 /* Fallback to socket priority if class id isn't set.
299 * Classful qdiscs use it as direct reference to class.
300 * For cgroup2 classid is always zero.
301 */
302 if (!classid)
303 classid = sk->sk_priority;
296 304
297 if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) 305 if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
298 goto errout; 306 goto errout;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d757b9642d0d..be778599bfed 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
216 atomic_set(&p->rid, 0); 216 atomic_set(&p->rid, 0);
217 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 217 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
218 p->rate_tokens = 0; 218 p->rate_tokens = 0;
219 p->n_redirects = 0;
219 /* 60*HZ is arbitrary, but chosen enough high so that the first 220 /* 60*HZ is arbitrary, but chosen enough high so that the first
220 * calculation of tokens is at its maximum. 221 * calculation of tokens is at its maximum.
221 */ 222 */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 3978f807fa8b..6ae89f2b541b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1457,9 +1457,23 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1457 struct ip_tunnel_parm *p = &t->parms; 1457 struct ip_tunnel_parm *p = &t->parms;
1458 __be16 o_flags = p->o_flags; 1458 __be16 o_flags = p->o_flags;
1459 1459
1460 if ((t->erspan_ver == 1 || t->erspan_ver == 2) && 1460 if (t->erspan_ver == 1 || t->erspan_ver == 2) {
1461 !t->collect_md) 1461 if (!t->collect_md)
1462 o_flags |= TUNNEL_KEY; 1462 o_flags |= TUNNEL_KEY;
1463
1464 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1465 goto nla_put_failure;
1466
1467 if (t->erspan_ver == 1) {
1468 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1469 goto nla_put_failure;
1470 } else {
1471 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1472 goto nla_put_failure;
1473 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1474 goto nla_put_failure;
1475 }
1476 }
1463 1477
1464 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1478 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1465 nla_put_be16(skb, IFLA_GRE_IFLAGS, 1479 nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -1495,19 +1509,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1495 goto nla_put_failure; 1509 goto nla_put_failure;
1496 } 1510 }
1497 1511
1498 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1499 goto nla_put_failure;
1500
1501 if (t->erspan_ver == 1) {
1502 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1503 goto nla_put_failure;
1504 } else if (t->erspan_ver == 2) {
1505 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1506 goto nla_put_failure;
1507 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1508 goto nla_put_failure;
1509 }
1510
1511 return 0; 1512 return 0;
1512 1513
1513nla_put_failure: 1514nla_put_failure:
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 2687db015b6f..fa2ba7c500e4 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -215,6 +215,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
215 215
216 /* Change outer to look like the reply to an incoming packet */ 216 /* Change outer to look like the reply to an incoming packet */
217 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 217 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
218 target.dst.protonum = IPPROTO_ICMP;
218 if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) 219 if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
219 return 0; 220 return 0;
220 221
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
index a0aa13bcabda..0a8a60c1bf9a 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
@@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
105int snmp_version(void *context, size_t hdrlen, unsigned char tag, 105int snmp_version(void *context, size_t hdrlen, unsigned char tag,
106 const void *data, size_t datalen) 106 const void *data, size_t datalen)
107{ 107{
108 if (datalen != 1)
109 return -EINVAL;
108 if (*(unsigned char *)data > 1) 110 if (*(unsigned char *)data > 1)
109 return -ENOTSUPP; 111 return -ENOTSUPP;
110 return 1; 112 return 1;
@@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
114 const void *data, size_t datalen) 116 const void *data, size_t datalen)
115{ 117{
116 struct snmp_ctx *ctx = (struct snmp_ctx *)context; 118 struct snmp_ctx *ctx = (struct snmp_ctx *)context;
117 __be32 *pdata = (__be32 *)data; 119 __be32 *pdata;
118 120
121 if (datalen != 4)
122 return -EINVAL;
123 pdata = (__be32 *)data;
119 if (*pdata == ctx->from) { 124 if (*pdata == ctx->from) {
120 pr_debug("%s: %pI4 to %pI4\n", __func__, 125 pr_debug("%s: %pI4 to %pI4\n", __func__,
121 (void *)&ctx->from, (void *)&ctx->to); 126 (void *)&ctx->from, (void *)&ctx->to);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ce92f73cf104..5163b64f8fb3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
887 /* No redirected packets during ip_rt_redirect_silence; 887 /* No redirected packets during ip_rt_redirect_silence;
888 * reset the algorithm. 888 * reset the algorithm.
889 */ 889 */
890 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) 890 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
891 peer->rate_tokens = 0; 891 peer->rate_tokens = 0;
892 peer->n_redirects = 0;
893 }
892 894
893 /* Too many ignored redirects; do not send anything 895 /* Too many ignored redirects; do not send anything
894 * set dst.rate_last to the last seen redirected packet. 896 * set dst.rate_last to the last seen redirected packet.
895 */ 897 */
896 if (peer->rate_tokens >= ip_rt_redirect_number) { 898 if (peer->n_redirects >= ip_rt_redirect_number) {
897 peer->rate_last = jiffies; 899 peer->rate_last = jiffies;
898 goto out_put_peer; 900 goto out_put_peer;
899 } 901 }
@@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
910 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); 912 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
911 peer->rate_last = jiffies; 913 peer->rate_last = jiffies;
912 ++peer->rate_tokens; 914 ++peer->rate_tokens;
915 ++peer->n_redirects;
913#ifdef CONFIG_IP_ROUTE_VERBOSE 916#ifdef CONFIG_IP_ROUTE_VERBOSE
914 if (log_martians && 917 if (log_martians &&
915 peer->rate_tokens == ip_rt_redirect_number) 918 peer->rate_tokens == ip_rt_redirect_number)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2079145a3b7c..cf3c5095c10e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2528,6 +2528,7 @@ void tcp_write_queue_purge(struct sock *sk)
2528 sk_mem_reclaim(sk); 2528 sk_mem_reclaim(sk);
2529 tcp_clear_all_retrans_hints(tcp_sk(sk)); 2529 tcp_clear_all_retrans_hints(tcp_sk(sk));
2530 tcp_sk(sk)->packets_out = 0; 2530 tcp_sk(sk)->packets_out = 0;
2531 inet_csk(sk)->icsk_backoff = 0;
2531} 2532}
2532 2533
2533int tcp_disconnect(struct sock *sk, int flags) 2534int tcp_disconnect(struct sock *sk, int flags)
@@ -2576,7 +2577,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2576 tp->write_seq += tp->max_window + 2; 2577 tp->write_seq += tp->max_window + 2;
2577 if (tp->write_seq == 0) 2578 if (tp->write_seq == 0)
2578 tp->write_seq = 1; 2579 tp->write_seq = 1;
2579 icsk->icsk_backoff = 0;
2580 tp->snd_cwnd = 2; 2580 tp->snd_cwnd = 2;
2581 icsk->icsk_probes_out = 0; 2581 icsk->icsk_probes_out = 0;
2582 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2582 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index efc6fef692ff..ec3cea9d6828 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
536 if (sock_owned_by_user(sk)) 536 if (sock_owned_by_user(sk))
537 break; 537 break;
538 538
539 skb = tcp_rtx_queue_head(sk);
540 if (WARN_ON_ONCE(!skb))
541 break;
542
539 icsk->icsk_backoff--; 543 icsk->icsk_backoff--;
540 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : 544 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
541 TCP_TIMEOUT_INIT; 545 TCP_TIMEOUT_INIT;
542 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); 546 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
543 547
544 skb = tcp_rtx_queue_head(sk);
545 548
546 tcp_mstamp_refresh(tp); 549 tcp_mstamp_refresh(tp);
547 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); 550 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 730bc44dbad9..ccc78f3a4b60 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2347,6 +2347,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2347 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ 2347 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2348 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; 2348 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2349 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 2349 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2350 tcp_init_tso_segs(skb, mss_now);
2350 goto repair; /* Skip network transmission */ 2351 goto repair; /* Skip network transmission */
2351 } 2352 }
2352 2353
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5c3cd5d84a6f..372fdc5381a9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
562 562
563 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 563 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
564 int (*handler)(struct sk_buff *skb, u32 info); 564 int (*handler)(struct sk_buff *skb, u32 info);
565 const struct ip_tunnel_encap_ops *encap;
565 566
566 if (!iptun_encaps[i]) 567 encap = rcu_dereference(iptun_encaps[i]);
568 if (!encap)
567 continue; 569 continue;
568 handler = rcu_dereference(iptun_encaps[i]->err_handler); 570 handler = encap->err_handler;
569 if (handler && !handler(skb, info)) 571 if (handler && !handler(skb, info))
570 return 0; 572 return 0;
571 } 573 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 84c358804355..72ffd3d760ff 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1165,7 +1165,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1165 list_for_each_entry(ifa, &idev->addr_list, if_list) { 1165 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1166 if (ifa == ifp) 1166 if (ifa == ifp)
1167 continue; 1167 continue;
1168 if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr, 1168 if (ifa->prefix_len != ifp->prefix_len ||
1169 !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1169 ifp->prefix_len)) 1170 ifp->prefix_len))
1170 continue; 1171 continue;
1171 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) 1172 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 5afe9f83374d..239d4a65ad6e 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -296,7 +296,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
296 skb->len += tailen; 296 skb->len += tailen;
297 skb->data_len += tailen; 297 skb->data_len += tailen;
298 skb->truesize += tailen; 298 skb->truesize += tailen;
299 if (sk) 299 if (sk && sk_fullsock(sk))
300 refcount_add(tailen, &sk->sk_wmem_alloc); 300 refcount_add(tailen, &sk->sk_wmem_alloc);
301 301
302 goto out; 302 goto out;
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index b858bd5280bf..867474abe269 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -72,7 +72,7 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
72 72
73static int gue6_err_proto_handler(int proto, struct sk_buff *skb, 73static int gue6_err_proto_handler(int proto, struct sk_buff *skb,
74 struct inet6_skb_parm *opt, 74 struct inet6_skb_parm *opt,
75 u8 type, u8 code, int offset, u32 info) 75 u8 type, u8 code, int offset, __be32 info)
76{ 76{
77 const struct inet6_protocol *ipprot; 77 const struct inet6_protocol *ipprot;
78 78
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 801a9a0c217e..26f25b6e2833 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1719,6 +1719,27 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1719 return 0; 1719 return 0;
1720} 1720}
1721 1721
1722static void ip6erspan_set_version(struct nlattr *data[],
1723 struct __ip6_tnl_parm *parms)
1724{
1725 if (!data)
1726 return;
1727
1728 parms->erspan_ver = 1;
1729 if (data[IFLA_GRE_ERSPAN_VER])
1730 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1731
1732 if (parms->erspan_ver == 1) {
1733 if (data[IFLA_GRE_ERSPAN_INDEX])
1734 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1735 } else if (parms->erspan_ver == 2) {
1736 if (data[IFLA_GRE_ERSPAN_DIR])
1737 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1738 if (data[IFLA_GRE_ERSPAN_HWID])
1739 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1740 }
1741}
1742
1722static void ip6gre_netlink_parms(struct nlattr *data[], 1743static void ip6gre_netlink_parms(struct nlattr *data[],
1723 struct __ip6_tnl_parm *parms) 1744 struct __ip6_tnl_parm *parms)
1724{ 1745{
@@ -1767,20 +1788,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
1767 1788
1768 if (data[IFLA_GRE_COLLECT_METADATA]) 1789 if (data[IFLA_GRE_COLLECT_METADATA])
1769 parms->collect_md = true; 1790 parms->collect_md = true;
1770
1771 parms->erspan_ver = 1;
1772 if (data[IFLA_GRE_ERSPAN_VER])
1773 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1774
1775 if (parms->erspan_ver == 1) {
1776 if (data[IFLA_GRE_ERSPAN_INDEX])
1777 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1778 } else if (parms->erspan_ver == 2) {
1779 if (data[IFLA_GRE_ERSPAN_DIR])
1780 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1781 if (data[IFLA_GRE_ERSPAN_HWID])
1782 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1783 }
1784} 1791}
1785 1792
1786static int ip6gre_tap_init(struct net_device *dev) 1793static int ip6gre_tap_init(struct net_device *dev)
@@ -2100,9 +2107,23 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2100 struct __ip6_tnl_parm *p = &t->parms; 2107 struct __ip6_tnl_parm *p = &t->parms;
2101 __be16 o_flags = p->o_flags; 2108 __be16 o_flags = p->o_flags;
2102 2109
2103 if ((p->erspan_ver == 1 || p->erspan_ver == 2) && 2110 if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2104 !p->collect_md) 2111 if (!p->collect_md)
2105 o_flags |= TUNNEL_KEY; 2112 o_flags |= TUNNEL_KEY;
2113
2114 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2115 goto nla_put_failure;
2116
2117 if (p->erspan_ver == 1) {
2118 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2119 goto nla_put_failure;
2120 } else {
2121 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2122 goto nla_put_failure;
2123 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2124 goto nla_put_failure;
2125 }
2126 }
2106 2127
2107 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2128 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2108 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2129 nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -2117,8 +2138,7 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2117 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || 2138 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2118 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || 2139 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2119 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || 2140 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
2120 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) || 2141 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
2121 nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2122 goto nla_put_failure; 2142 goto nla_put_failure;
2123 2143
2124 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 2144 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
@@ -2136,19 +2156,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2136 goto nla_put_failure; 2156 goto nla_put_failure;
2137 } 2157 }
2138 2158
2139 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2140 goto nla_put_failure;
2141
2142 if (p->erspan_ver == 1) {
2143 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2144 goto nla_put_failure;
2145 } else if (p->erspan_ver == 2) {
2146 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2147 goto nla_put_failure;
2148 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2149 goto nla_put_failure;
2150 }
2151
2152 return 0; 2159 return 0;
2153 2160
2154nla_put_failure: 2161nla_put_failure:
@@ -2203,6 +2210,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
2203 int err; 2210 int err;
2204 2211
2205 ip6gre_netlink_parms(data, &nt->parms); 2212 ip6gre_netlink_parms(data, &nt->parms);
2213 ip6erspan_set_version(data, &nt->parms);
2206 ign = net_generic(net, ip6gre_net_id); 2214 ign = net_generic(net, ip6gre_net_id);
2207 2215
2208 if (nt->parms.collect_md) { 2216 if (nt->parms.collect_md) {
@@ -2248,6 +2256,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2248 if (IS_ERR(t)) 2256 if (IS_ERR(t))
2249 return PTR_ERR(t); 2257 return PTR_ERR(t);
2250 2258
2259 ip6erspan_set_version(data, &p);
2251 ip6gre_tunnel_unlink_md(ign, t); 2260 ip6gre_tunnel_unlink_md(ign, t);
2252 ip6gre_tunnel_unlink(ign, t); 2261 ip6gre_tunnel_unlink(ign, t);
2253 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); 2262 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index 23022447eb49..7a41ee3c11b4 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -226,6 +226,7 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
226 } 226 }
227 227
228 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 228 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
229 target.dst.protonum = IPPROTO_ICMPV6;
229 if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) 230 if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
230 return 0; 231 return 0;
231 232
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 964491cf3672..ce15dc4ccbfa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1274,18 +1274,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock);
1274static void rt6_remove_exception(struct rt6_exception_bucket *bucket, 1274static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1275 struct rt6_exception *rt6_ex) 1275 struct rt6_exception *rt6_ex)
1276{ 1276{
1277 struct fib6_info *from;
1277 struct net *net; 1278 struct net *net;
1278 1279
1279 if (!bucket || !rt6_ex) 1280 if (!bucket || !rt6_ex)
1280 return; 1281 return;
1281 1282
1282 net = dev_net(rt6_ex->rt6i->dst.dev); 1283 net = dev_net(rt6_ex->rt6i->dst.dev);
1284 net->ipv6.rt6_stats->fib_rt_cache--;
1285
1286 /* purge completely the exception to allow releasing the held resources:
1287 * some [sk] cache may keep the dst around for unlimited time
1288 */
1289 from = rcu_dereference_protected(rt6_ex->rt6i->from,
1290 lockdep_is_held(&rt6_exception_lock));
1291 rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
1292 fib6_info_release(from);
1293 dst_dev_put(&rt6_ex->rt6i->dst);
1294
1283 hlist_del_rcu(&rt6_ex->hlist); 1295 hlist_del_rcu(&rt6_ex->hlist);
1284 dst_release(&rt6_ex->rt6i->dst); 1296 dst_release(&rt6_ex->rt6i->dst);
1285 kfree_rcu(rt6_ex, rcu); 1297 kfree_rcu(rt6_ex, rcu);
1286 WARN_ON_ONCE(!bucket->depth); 1298 WARN_ON_ONCE(!bucket->depth);
1287 bucket->depth--; 1299 bucket->depth--;
1288 net->ipv6.rt6_stats->fib_rt_cache--;
1289} 1300}
1290 1301
1291/* Remove oldest rt6_ex in bucket and free the memory 1302/* Remove oldest rt6_ex in bucket and free the memory
@@ -1599,15 +1610,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt)
1599static void rt6_update_exception_stamp_rt(struct rt6_info *rt) 1610static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1600{ 1611{
1601 struct rt6_exception_bucket *bucket; 1612 struct rt6_exception_bucket *bucket;
1602 struct fib6_info *from = rt->from;
1603 struct in6_addr *src_key = NULL; 1613 struct in6_addr *src_key = NULL;
1604 struct rt6_exception *rt6_ex; 1614 struct rt6_exception *rt6_ex;
1605 1615 struct fib6_info *from;
1606 if (!from ||
1607 !(rt->rt6i_flags & RTF_CACHE))
1608 return;
1609 1616
1610 rcu_read_lock(); 1617 rcu_read_lock();
1618 from = rcu_dereference(rt->from);
1619 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1620 goto unlock;
1621
1611 bucket = rcu_dereference(from->rt6i_exception_bucket); 1622 bucket = rcu_dereference(from->rt6i_exception_bucket);
1612 1623
1613#ifdef CONFIG_IPV6_SUBTREES 1624#ifdef CONFIG_IPV6_SUBTREES
@@ -1626,6 +1637,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1626 if (rt6_ex) 1637 if (rt6_ex)
1627 rt6_ex->stamp = jiffies; 1638 rt6_ex->stamp = jiffies;
1628 1639
1640unlock:
1629 rcu_read_unlock(); 1641 rcu_read_unlock();
1630} 1642}
1631 1643
@@ -2742,20 +2754,24 @@ static int ip6_route_check_nh_onlink(struct net *net,
2742 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; 2754 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
2743 const struct in6_addr *gw_addr = &cfg->fc_gateway; 2755 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2744 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT; 2756 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2757 struct fib6_info *from;
2745 struct rt6_info *grt; 2758 struct rt6_info *grt;
2746 int err; 2759 int err;
2747 2760
2748 err = 0; 2761 err = 0;
2749 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0); 2762 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2750 if (grt) { 2763 if (grt) {
2764 rcu_read_lock();
2765 from = rcu_dereference(grt->from);
2751 if (!grt->dst.error && 2766 if (!grt->dst.error &&
2752 /* ignore match if it is the default route */ 2767 /* ignore match if it is the default route */
2753 grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) && 2768 from && !ipv6_addr_any(&from->fib6_dst.addr) &&
2754 (grt->rt6i_flags & flags || dev != grt->dst.dev)) { 2769 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2755 NL_SET_ERR_MSG(extack, 2770 NL_SET_ERR_MSG(extack,
2756 "Nexthop has invalid gateway or device mismatch"); 2771 "Nexthop has invalid gateway or device mismatch");
2757 err = -EINVAL; 2772 err = -EINVAL;
2758 } 2773 }
2774 rcu_read_unlock();
2759 2775
2760 ip6_rt_put(grt); 2776 ip6_rt_put(grt);
2761 } 2777 }
@@ -4649,7 +4665,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4649 table = rt->fib6_table->tb6_id; 4665 table = rt->fib6_table->tb6_id;
4650 else 4666 else
4651 table = RT6_TABLE_UNSPEC; 4667 table = RT6_TABLE_UNSPEC;
4652 rtm->rtm_table = table; 4668 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
4653 if (nla_put_u32(skb, RTA_TABLE, table)) 4669 if (nla_put_u32(skb, RTA_TABLE, table))
4654 goto nla_put_failure; 4670 goto nla_put_failure;
4655 4671
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 8d0ba757a46c..9b2f272ca164 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
221 rcu_read_unlock(); 221 rcu_read_unlock();
222 222
223 genlmsg_end(msg, hdr); 223 genlmsg_end(msg, hdr);
224 genlmsg_reply(msg, info); 224 return genlmsg_reply(msg, info);
225
226 return 0;
227 225
228nla_put_failure: 226nla_put_failure:
229 rcu_read_unlock(); 227 rcu_read_unlock();
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 2596ffdeebea..b444483cdb2b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -288,8 +288,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
288 int peeked, peeking, off; 288 int peeked, peeking, off;
289 int err; 289 int err;
290 int is_udplite = IS_UDPLITE(sk); 290 int is_udplite = IS_UDPLITE(sk);
291 struct udp_mib __percpu *mib;
291 bool checksum_valid = false; 292 bool checksum_valid = false;
292 struct udp_mib *mib;
293 int is_udp4; 293 int is_udp4;
294 294
295 if (flags & MSG_ERRQUEUE) 295 if (flags & MSG_ERRQUEUE)
@@ -420,17 +420,19 @@ EXPORT_SYMBOL(udpv6_encap_enable);
420 */ 420 */
421static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 421static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
422 struct inet6_skb_parm *opt, 422 struct inet6_skb_parm *opt,
423 u8 type, u8 code, int offset, u32 info) 423 u8 type, u8 code, int offset, __be32 info)
424{ 424{
425 int i; 425 int i;
426 426
427 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 427 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
428 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 428 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
429 u8 type, u8 code, int offset, u32 info); 429 u8 type, u8 code, int offset, __be32 info);
430 const struct ip6_tnl_encap_ops *encap;
430 431
431 if (!ip6tun_encaps[i]) 432 encap = rcu_dereference(ip6tun_encaps[i]);
433 if (!encap)
432 continue; 434 continue;
433 handler = rcu_dereference(ip6tun_encaps[i]->err_handler); 435 handler = encap->err_handler;
434 if (handler && !handler(skb, opt, type, code, offset, info)) 436 if (handler && !handler(skb, opt, type, code, offset, info))
435 return 0; 437 return 0;
436 } 438 }
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index f5b4febeaa25..bc65db782bfb 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
344 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 344 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
345 unsigned int i; 345 unsigned int i;
346 346
347 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
348 xfrm_flush_gc(); 347 xfrm_flush_gc();
348 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
349 349
350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) 350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); 351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 655c787f9d54..5651c29cb5bd 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock)
196 return 0; 196 return 0;
197} 197}
198 198
199static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, 199static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
200 gfp_t allocation, struct sock *sk) 200 struct sock *sk)
201{ 201{
202 int err = -ENOBUFS; 202 int err = -ENOBUFS;
203 203
204 sock_hold(sk); 204 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
205 if (*skb2 == NULL) { 205 return err;
206 if (refcount_read(&skb->users) != 1) { 206
207 *skb2 = skb_clone(skb, allocation); 207 skb = skb_clone(skb, allocation);
208 } else { 208
209 *skb2 = skb; 209 if (skb) {
210 refcount_inc(&skb->users); 210 skb_set_owner_r(skb, sk);
211 } 211 skb_queue_tail(&sk->sk_receive_queue, skb);
212 } 212 sk->sk_data_ready(sk);
213 if (*skb2 != NULL) { 213 err = 0;
214 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
215 skb_set_owner_r(*skb2, sk);
216 skb_queue_tail(&sk->sk_receive_queue, *skb2);
217 sk->sk_data_ready(sk);
218 *skb2 = NULL;
219 err = 0;
220 }
221 } 214 }
222 sock_put(sk);
223 return err; 215 return err;
224} 216}
225 217
@@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
234{ 226{
235 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 227 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
236 struct sock *sk; 228 struct sock *sk;
237 struct sk_buff *skb2 = NULL;
238 int err = -ESRCH; 229 int err = -ESRCH;
239 230
240 /* XXX Do we need something like netlink_overrun? I think 231 /* XXX Do we need something like netlink_overrun? I think
@@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
253 * socket. 244 * socket.
254 */ 245 */
255 if (pfk->promisc) 246 if (pfk->promisc)
256 pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); 247 pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
257 248
258 /* the exact target will be processed later */ 249 /* the exact target will be processed later */
259 if (sk == one_sk) 250 if (sk == one_sk)
@@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
268 continue; 259 continue;
269 } 260 }
270 261
271 err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); 262 err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
272 263
273 /* Error is cleared after successful sending to at least one 264 /* Error is cleared after successful sending to at least one
274 * registered KM */ 265 * registered KM */
@@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
278 rcu_read_unlock(); 269 rcu_read_unlock();
279 270
280 if (one_sk != NULL) 271 if (one_sk != NULL)
281 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); 272 err = pfkey_broadcast_one(skb, allocation, one_sk);
282 273
283 kfree_skb(skb2);
284 kfree_skb(skb); 274 kfree_skb(skb);
285 return err; 275 return err;
286} 276}
@@ -1783,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
1783 if (proto == 0) 1773 if (proto == 0)
1784 return -EINVAL; 1774 return -EINVAL;
1785 1775
1786 err = xfrm_state_flush(net, proto, true); 1776 err = xfrm_state_flush(net, proto, true, false);
1787 err2 = unicast_flush_resp(sk, hdr); 1777 err2 = unicast_flush_resp(sk, hdr);
1788 if (err || err2) { 1778 if (err || err2) {
1789 if (err == -ESRCH) /* empty table - go quietly */ 1779 if (err == -ESRCH) /* empty table - go quietly */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 69e831bc317b..54821fb1a960 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -8,7 +8,7 @@
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright(c) 2015-2017 Intel Deutschland GmbH 10 * Copyright(c) 2015-2017 Intel Deutschland GmbH
11 * Copyright (C) 2018 Intel Corporation 11 * Copyright (C) 2018 - 2019 Intel Corporation
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as 14 * it under the terms of the GNU General Public License version 2 as
@@ -366,6 +366,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
366 366
367 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); 367 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
368 368
369 ieee80211_agg_stop_txq(sta, tid);
370
369 spin_unlock_bh(&sta->lock); 371 spin_unlock_bh(&sta->lock);
370 372
371 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", 373 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 2493c74c2d37..96496b2c1670 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
941 BSS_CHANGED_P2P_PS | 941 BSS_CHANGED_P2P_PS |
942 BSS_CHANGED_TXPOWER; 942 BSS_CHANGED_TXPOWER;
943 int err; 943 int err;
944 int prev_beacon_int;
944 945
945 old = sdata_dereference(sdata->u.ap.beacon, sdata); 946 old = sdata_dereference(sdata->u.ap.beacon, sdata);
946 if (old) 947 if (old)
@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
963 964
964 sdata->needed_rx_chains = sdata->local->rx_chains; 965 sdata->needed_rx_chains = sdata->local->rx_chains;
965 966
967 prev_beacon_int = sdata->vif.bss_conf.beacon_int;
966 sdata->vif.bss_conf.beacon_int = params->beacon_interval; 968 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
967 969
968 if (params->he_cap) 970 if (params->he_cap)
@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
974 if (!err) 976 if (!err)
975 ieee80211_vif_copy_chanctx_to_vlans(sdata, false); 977 ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
976 mutex_unlock(&local->mtx); 978 mutex_unlock(&local->mtx);
977 if (err) 979 if (err) {
980 sdata->vif.bss_conf.beacon_int = prev_beacon_int;
978 return err; 981 return err;
982 }
979 983
980 /* 984 /*
981 * Apply control port protocol, this allows us to 985 * Apply control port protocol, this allows us to
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 87a729926734..977dea436ee8 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
615 * We need a bit of data queued to build aggregates properly, so 615 * We need a bit of data queued to build aggregates properly, so
616 * instruct the TCP stack to allow more than a single ms of data 616 * instruct the TCP stack to allow more than a single ms of data
617 * to be queued in the stack. The value is a bit-shift of 1 617 * to be queued in the stack. The value is a bit-shift of 1
618 * second, so 8 is ~4ms of queued data. Only affects local TCP 618 * second, so 7 is ~8ms of queued data. Only affects local TCP
619 * sockets. 619 * sockets.
620 * This is the default, anyhow - drivers may need to override it 620 * This is the default, anyhow - drivers may need to override it
621 * for local reasons (longer buffers, longer completion time, or 621 * for local reasons (longer buffers, longer completion time, or
622 * similar). 622 * similar).
623 */ 623 */
624 local->hw.tx_sk_pacing_shift = 8; 624 local->hw.tx_sk_pacing_shift = 7;
625 625
626 /* set up some defaults */ 626 /* set up some defaults */
627 local->hw.queues = 1; 627 local->hw.queues = 1;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index cad6592c52a1..2ec7011a4d07 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
70 * @dst: mesh path destination mac address 70 * @dst: mesh path destination mac address
71 * @mpp: mesh proxy mac address 71 * @mpp: mesh proxy mac address
72 * @rhash: rhashtable list pointer 72 * @rhash: rhashtable list pointer
73 * @walk_list: linked list containing all mesh_path objects.
73 * @gate_list: list pointer for known gates list 74 * @gate_list: list pointer for known gates list
74 * @sdata: mesh subif 75 * @sdata: mesh subif
75 * @next_hop: mesh neighbor to which frames for this destination will be 76 * @next_hop: mesh neighbor to which frames for this destination will be
@@ -105,6 +106,7 @@ struct mesh_path {
105 u8 dst[ETH_ALEN]; 106 u8 dst[ETH_ALEN];
106 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ 107 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
107 struct rhash_head rhash; 108 struct rhash_head rhash;
109 struct hlist_node walk_list;
108 struct hlist_node gate_list; 110 struct hlist_node gate_list;
109 struct ieee80211_sub_if_data *sdata; 111 struct ieee80211_sub_if_data *sdata;
110 struct sta_info __rcu *next_hop; 112 struct sta_info __rcu *next_hop;
@@ -133,12 +135,16 @@ struct mesh_path {
133 * gate's mpath may or may not be resolved and active. 135 * gate's mpath may or may not be resolved and active.
134 * @gates_lock: protects updates to known_gates 136 * @gates_lock: protects updates to known_gates
135 * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr 137 * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
138 * @walk_head: linked list containging all mesh_path objects
139 * @walk_lock: lock protecting walk_head
136 * @entries: number of entries in the table 140 * @entries: number of entries in the table
137 */ 141 */
138struct mesh_table { 142struct mesh_table {
139 struct hlist_head known_gates; 143 struct hlist_head known_gates;
140 spinlock_t gates_lock; 144 spinlock_t gates_lock;
141 struct rhashtable rhead; 145 struct rhashtable rhead;
146 struct hlist_head walk_head;
147 spinlock_t walk_lock;
142 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ 148 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
143}; 149};
144 150
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a5125624a76d..88a6d5e18ccc 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
59 return NULL; 59 return NULL;
60 60
61 INIT_HLIST_HEAD(&newtbl->known_gates); 61 INIT_HLIST_HEAD(&newtbl->known_gates);
62 INIT_HLIST_HEAD(&newtbl->walk_head);
62 atomic_set(&newtbl->entries, 0); 63 atomic_set(&newtbl->entries, 0);
63 spin_lock_init(&newtbl->gates_lock); 64 spin_lock_init(&newtbl->gates_lock);
65 spin_lock_init(&newtbl->walk_lock);
64 66
65 return newtbl; 67 return newtbl;
66} 68}
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
249static struct mesh_path * 251static struct mesh_path *
250__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) 252__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
251{ 253{
252 int i = 0, ret; 254 int i = 0;
253 struct mesh_path *mpath = NULL; 255 struct mesh_path *mpath;
254 struct rhashtable_iter iter;
255
256 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
257 if (ret)
258 return NULL;
259
260 rhashtable_walk_start(&iter);
261 256
262 while ((mpath = rhashtable_walk_next(&iter))) { 257 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
263 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
264 continue;
265 if (IS_ERR(mpath))
266 break;
267 if (i++ == idx) 258 if (i++ == idx)
268 break; 259 break;
269 } 260 }
270 rhashtable_walk_stop(&iter);
271 rhashtable_walk_exit(&iter);
272 261
273 if (IS_ERR(mpath) || !mpath) 262 if (!mpath)
274 return NULL; 263 return NULL;
275 264
276 if (mpath_expired(mpath)) { 265 if (mpath_expired(mpath)) {
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
432 return ERR_PTR(-ENOMEM); 421 return ERR_PTR(-ENOMEM);
433 422
434 tbl = sdata->u.mesh.mesh_paths; 423 tbl = sdata->u.mesh.mesh_paths;
424 spin_lock_bh(&tbl->walk_lock);
435 do { 425 do {
436 ret = rhashtable_lookup_insert_fast(&tbl->rhead, 426 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
437 &new_mpath->rhash, 427 &new_mpath->rhash,
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
441 mpath = rhashtable_lookup_fast(&tbl->rhead, 431 mpath = rhashtable_lookup_fast(&tbl->rhead,
442 dst, 432 dst,
443 mesh_rht_params); 433 mesh_rht_params);
444 434 else if (!ret)
435 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
445 } while (unlikely(ret == -EEXIST && !mpath)); 436 } while (unlikely(ret == -EEXIST && !mpath));
437 spin_unlock_bh(&tbl->walk_lock);
446 438
447 if (ret && ret != -EEXIST) 439 if (ret) {
448 return ERR_PTR(ret);
449
450 /* At this point either new_mpath was added, or we found a
451 * matching entry already in the table; in the latter case
452 * free the unnecessary new entry.
453 */
454 if (ret == -EEXIST) {
455 kfree(new_mpath); 440 kfree(new_mpath);
441
442 if (ret != -EEXIST)
443 return ERR_PTR(ret);
444
456 new_mpath = mpath; 445 new_mpath = mpath;
457 } 446 }
447
458 sdata->u.mesh.mesh_paths_generation++; 448 sdata->u.mesh.mesh_paths_generation++;
459 return new_mpath; 449 return new_mpath;
460} 450}
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
480 470
481 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 471 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
482 tbl = sdata->u.mesh.mpp_paths; 472 tbl = sdata->u.mesh.mpp_paths;
473
474 spin_lock_bh(&tbl->walk_lock);
483 ret = rhashtable_lookup_insert_fast(&tbl->rhead, 475 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
484 &new_mpath->rhash, 476 &new_mpath->rhash,
485 mesh_rht_params); 477 mesh_rht_params);
478 if (!ret)
479 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
480 spin_unlock_bh(&tbl->walk_lock);
481
482 if (ret)
483 kfree(new_mpath);
486 484
487 sdata->u.mesh.mpp_paths_generation++; 485 sdata->u.mesh.mpp_paths_generation++;
488 return ret; 486 return ret;
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
503 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 501 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
504 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 502 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
505 struct mesh_path *mpath; 503 struct mesh_path *mpath;
506 struct rhashtable_iter iter;
507 int ret;
508
509 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
510 if (ret)
511 return;
512 504
513 rhashtable_walk_start(&iter); 505 rcu_read_lock();
514 506 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
515 while ((mpath = rhashtable_walk_next(&iter))) {
516 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
517 continue;
518 if (IS_ERR(mpath))
519 break;
520 if (rcu_access_pointer(mpath->next_hop) == sta && 507 if (rcu_access_pointer(mpath->next_hop) == sta &&
521 mpath->flags & MESH_PATH_ACTIVE && 508 mpath->flags & MESH_PATH_ACTIVE &&
522 !(mpath->flags & MESH_PATH_FIXED)) { 509 !(mpath->flags & MESH_PATH_FIXED)) {
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
530 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); 517 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
531 } 518 }
532 } 519 }
533 rhashtable_walk_stop(&iter); 520 rcu_read_unlock();
534 rhashtable_walk_exit(&iter);
535} 521}
536 522
537static void mesh_path_free_rcu(struct mesh_table *tbl, 523static void mesh_path_free_rcu(struct mesh_table *tbl,
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
551 537
552static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) 538static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
553{ 539{
540 hlist_del_rcu(&mpath->walk_list);
554 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); 541 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
555 mesh_path_free_rcu(tbl, mpath); 542 mesh_path_free_rcu(tbl, mpath);
556} 543}
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
571 struct ieee80211_sub_if_data *sdata = sta->sdata; 558 struct ieee80211_sub_if_data *sdata = sta->sdata;
572 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 559 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
573 struct mesh_path *mpath; 560 struct mesh_path *mpath;
574 struct rhashtable_iter iter; 561 struct hlist_node *n;
575 int ret;
576
577 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
578 if (ret)
579 return;
580
581 rhashtable_walk_start(&iter);
582
583 while ((mpath = rhashtable_walk_next(&iter))) {
584 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
585 continue;
586 if (IS_ERR(mpath))
587 break;
588 562
563 spin_lock_bh(&tbl->walk_lock);
564 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
589 if (rcu_access_pointer(mpath->next_hop) == sta) 565 if (rcu_access_pointer(mpath->next_hop) == sta)
590 __mesh_path_del(tbl, mpath); 566 __mesh_path_del(tbl, mpath);
591 } 567 }
592 568 spin_unlock_bh(&tbl->walk_lock);
593 rhashtable_walk_stop(&iter);
594 rhashtable_walk_exit(&iter);
595} 569}
596 570
597static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, 571static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
599{ 573{
600 struct mesh_table *tbl = sdata->u.mesh.mpp_paths; 574 struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
601 struct mesh_path *mpath; 575 struct mesh_path *mpath;
602 struct rhashtable_iter iter; 576 struct hlist_node *n;
603 int ret;
604
605 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
606 if (ret)
607 return;
608
609 rhashtable_walk_start(&iter);
610
611 while ((mpath = rhashtable_walk_next(&iter))) {
612 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
613 continue;
614 if (IS_ERR(mpath))
615 break;
616 577
578 spin_lock_bh(&tbl->walk_lock);
579 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
617 if (ether_addr_equal(mpath->mpp, proxy)) 580 if (ether_addr_equal(mpath->mpp, proxy))
618 __mesh_path_del(tbl, mpath); 581 __mesh_path_del(tbl, mpath);
619 } 582 }
620 583 spin_unlock_bh(&tbl->walk_lock);
621 rhashtable_walk_stop(&iter);
622 rhashtable_walk_exit(&iter);
623} 584}
624 585
625static void table_flush_by_iface(struct mesh_table *tbl) 586static void table_flush_by_iface(struct mesh_table *tbl)
626{ 587{
627 struct mesh_path *mpath; 588 struct mesh_path *mpath;
628 struct rhashtable_iter iter; 589 struct hlist_node *n;
629 int ret;
630
631 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
632 if (ret)
633 return;
634
635 rhashtable_walk_start(&iter);
636 590
637 while ((mpath = rhashtable_walk_next(&iter))) { 591 spin_lock_bh(&tbl->walk_lock);
638 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) 592 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
639 continue;
640 if (IS_ERR(mpath))
641 break;
642 __mesh_path_del(tbl, mpath); 593 __mesh_path_del(tbl, mpath);
643 } 594 }
644 595 spin_unlock_bh(&tbl->walk_lock);
645 rhashtable_walk_stop(&iter);
646 rhashtable_walk_exit(&iter);
647} 596}
648 597
649/** 598/**
@@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl,
675{ 624{
676 struct mesh_path *mpath; 625 struct mesh_path *mpath;
677 626
678 rcu_read_lock(); 627 spin_lock_bh(&tbl->walk_lock);
679 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); 628 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
680 if (!mpath) { 629 if (!mpath) {
681 rcu_read_unlock(); 630 spin_unlock_bh(&tbl->walk_lock);
682 return -ENXIO; 631 return -ENXIO;
683 } 632 }
684 633
685 __mesh_path_del(tbl, mpath); 634 __mesh_path_del(tbl, mpath);
686 rcu_read_unlock(); 635 spin_unlock_bh(&tbl->walk_lock);
687 return 0; 636 return 0;
688} 637}
689 638
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
854 struct mesh_table *tbl) 803 struct mesh_table *tbl)
855{ 804{
856 struct mesh_path *mpath; 805 struct mesh_path *mpath;
857 struct rhashtable_iter iter; 806 struct hlist_node *n;
858 int ret;
859 807
860 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); 808 spin_lock_bh(&tbl->walk_lock);
861 if (ret) 809 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
862 return;
863
864 rhashtable_walk_start(&iter);
865
866 while ((mpath = rhashtable_walk_next(&iter))) {
867 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
868 continue;
869 if (IS_ERR(mpath))
870 break;
871 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 810 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
872 (!(mpath->flags & MESH_PATH_FIXED)) && 811 (!(mpath->flags & MESH_PATH_FIXED)) &&
873 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 812 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
874 __mesh_path_del(tbl, mpath); 813 __mesh_path_del(tbl, mpath);
875 } 814 }
876 815 spin_unlock_bh(&tbl->walk_lock);
877 rhashtable_walk_stop(&iter);
878 rhashtable_walk_exit(&iter);
879} 816}
880 817
881void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 818void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bb4d71efb6fb..c2a6da5d80da 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2644,6 +2644,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2644 struct ieee80211_sub_if_data *sdata = rx->sdata; 2644 struct ieee80211_sub_if_data *sdata = rx->sdata;
2645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2646 u16 ac, q, hdrlen; 2646 u16 ac, q, hdrlen;
2647 int tailroom = 0;
2647 2648
2648 hdr = (struct ieee80211_hdr *) skb->data; 2649 hdr = (struct ieee80211_hdr *) skb->data;
2649 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2650 hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2732,8 +2733,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2732 if (!ifmsh->mshcfg.dot11MeshForwarding) 2733 if (!ifmsh->mshcfg.dot11MeshForwarding)
2733 goto out; 2734 goto out;
2734 2735
2736 if (sdata->crypto_tx_tailroom_needed_cnt)
2737 tailroom = IEEE80211_ENCRYPT_TAILROOM;
2738
2735 fwd_skb = skb_copy_expand(skb, local->tx_headroom + 2739 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2736 sdata->encrypt_headroom, 0, GFP_ATOMIC); 2740 sdata->encrypt_headroom,
2741 tailroom, GFP_ATOMIC);
2737 if (!fwd_skb) 2742 if (!fwd_skb)
2738 goto out; 2743 goto out;
2739 2744
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d0eb38b890aa..ba950ae974fc 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -5,7 +5,7 @@
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright (C) 2015-2017 Intel Deutschland GmbH 7 * Copyright (C) 2015-2017 Intel Deutschland GmbH
8 * Copyright (C) 2018 Intel Corporation 8 * Copyright (C) 2018-2019 Intel Corporation
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -2146,6 +2146,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
2146 case NL80211_IFTYPE_AP_VLAN: 2146 case NL80211_IFTYPE_AP_VLAN:
2147 case NL80211_IFTYPE_MONITOR: 2147 case NL80211_IFTYPE_MONITOR:
2148 break; 2148 break;
2149 case NL80211_IFTYPE_ADHOC:
2150 if (sdata->vif.bss_conf.ibss_joined)
2151 WARN_ON(drv_join_ibss(local, sdata));
2152 /* fall through */
2149 default: 2153 default:
2150 ieee80211_reconfig_stations(sdata); 2154 ieee80211_reconfig_stations(sdata);
2151 /* fall through */ 2155 /* fall through */
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index cad48d07c818..8401cefd9f65 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -29,6 +29,7 @@ config IP_VS_IPV6
29 bool "IPv6 support for IPVS" 29 bool "IPv6 support for IPVS"
30 depends on IPV6 = y || IP_VS = IPV6 30 depends on IPV6 = y || IP_VS = IPV6
31 select IP6_NF_IPTABLES 31 select IP6_NF_IPTABLES
32 select NF_DEFRAG_IPV6
32 ---help--- 33 ---help---
33 Add IPv6 support to IPVS. 34 Add IPv6 support to IPVS.
34 35
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index fe9abf3cc10a..235205c93e14 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1536,14 +1536,12 @@ ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
1536 /* sorry, all this trouble for a no-hit :) */ 1536 /* sorry, all this trouble for a no-hit :) */
1537 IP_VS_DBG_PKT(12, af, pp, skb, iph->off, 1537 IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
1538 "ip_vs_in: packet continues traversal as normal"); 1538 "ip_vs_in: packet continues traversal as normal");
1539 if (iph->fragoffs) { 1539
1540 /* Fragment that couldn't be mapped to a conn entry 1540 /* Fragment couldn't be mapped to a conn entry */
1541 * is missing module nf_defrag_ipv6 1541 if (iph->fragoffs)
1542 */
1543 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1544 IP_VS_DBG_PKT(7, af, pp, skb, iph->off, 1542 IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
1545 "unhandled fragment"); 1543 "unhandled fragment");
1546 } 1544
1547 *verdict = NF_ACCEPT; 1545 *verdict = NF_ACCEPT;
1548 return 0; 1546 return 0;
1549 } 1547 }
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 7d6318664eb2..ac8d848d7624 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -43,6 +43,7 @@
43#ifdef CONFIG_IP_VS_IPV6 43#ifdef CONFIG_IP_VS_IPV6
44#include <net/ipv6.h> 44#include <net/ipv6.h>
45#include <net/ip6_route.h> 45#include <net/ip6_route.h>
46#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
46#endif 47#endif
47#include <net/route.h> 48#include <net/route.h>
48#include <net/sock.h> 49#include <net/sock.h>
@@ -900,11 +901,17 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
900 901
901#ifdef CONFIG_IP_VS_IPV6 902#ifdef CONFIG_IP_VS_IPV6
902 if (udest->af == AF_INET6) { 903 if (udest->af == AF_INET6) {
904 int ret;
905
903 atype = ipv6_addr_type(&udest->addr.in6); 906 atype = ipv6_addr_type(&udest->addr.in6);
904 if ((!(atype & IPV6_ADDR_UNICAST) || 907 if ((!(atype & IPV6_ADDR_UNICAST) ||
905 atype & IPV6_ADDR_LINKLOCAL) && 908 atype & IPV6_ADDR_LINKLOCAL) &&
906 !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6)) 909 !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6))
907 return -EINVAL; 910 return -EINVAL;
911
912 ret = nf_defrag_ipv6_enable(svc->ipvs->net);
913 if (ret)
914 return ret;
908 } else 915 } else
909#endif 916#endif
910 { 917 {
@@ -1228,6 +1235,10 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
1228 ret = -EINVAL; 1235 ret = -EINVAL;
1229 goto out_err; 1236 goto out_err;
1230 } 1237 }
1238
1239 ret = nf_defrag_ipv6_enable(ipvs->net);
1240 if (ret)
1241 goto out_err;
1231 } 1242 }
1232#endif 1243#endif
1233 1244
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5a92f23f179f..4893f248dfdc 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -313,6 +313,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
313 int err; 313 int err;
314 314
315 list_for_each_entry(rule, &ctx->chain->rules, list) { 315 list_for_each_entry(rule, &ctx->chain->rules, list) {
316 if (!nft_is_active_next(ctx->net, rule))
317 continue;
318
316 err = nft_delrule(ctx, rule); 319 err = nft_delrule(ctx, rule);
317 if (err < 0) 320 if (err < 0)
318 return err; 321 return err;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index fe64df848365..0a4bad55a8aa 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -315,6 +315,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
315{ 315{
316 struct xt_target *target = expr->ops->data; 316 struct xt_target *target = expr->ops->data;
317 void *info = nft_expr_priv(expr); 317 void *info = nft_expr_priv(expr);
318 struct module *me = target->me;
318 struct xt_tgdtor_param par; 319 struct xt_tgdtor_param par;
319 320
320 par.net = ctx->net; 321 par.net = ctx->net;
@@ -325,7 +326,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
325 par.target->destroy(&par); 326 par.target->destroy(&par);
326 327
327 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) 328 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
328 module_put(target->me); 329 module_put(me);
329} 330}
330 331
331static int nft_extension_dump_info(struct sk_buff *skb, int attr, 332static int nft_extension_dump_info(struct sk_buff *skb, int attr,
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index aecadd471e1d..13e1ac333fa4 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1899,7 +1899,7 @@ static int __init xt_init(void)
1899 seqcount_init(&per_cpu(xt_recseq, i)); 1899 seqcount_init(&per_cpu(xt_recseq, i));
1900 } 1900 }
1901 1901
1902 xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); 1902 xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1903 if (!xt) 1903 if (!xt)
1904 return -ENOMEM; 1904 return -ENOMEM;
1905 1905
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3b1a78906bc0..1cd1d83a4be0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4292,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4292 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4292 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4293 if (unlikely(rb->frames_per_block == 0)) 4293 if (unlikely(rb->frames_per_block == 0))
4294 goto out; 4294 goto out;
4295 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) 4295 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4296 goto out; 4296 goto out;
4297 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4297 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4298 req->tp_frame_nr)) 4298 req->tp_frame_nr))
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9fc76b19cd3c..db3473540303 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code,
132 ph->utid = 0; 132 ph->utid = 0;
133 ph->message_id = id; 133 ph->message_id = id;
134 ph->pipe_handle = pn->pipe_handle; 134 ph->pipe_handle = pn->pipe_handle;
135 ph->data[0] = code; 135 ph->error_code = code;
136 return pn_skb_send(sk, skb, NULL); 136 return pn_skb_send(sk, skb, NULL);
137} 137}
138 138
@@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
153 ph->utid = id; /* whatever */ 153 ph->utid = id; /* whatever */
154 ph->message_id = id; 154 ph->message_id = id;
155 ph->pipe_handle = pn->pipe_handle; 155 ph->pipe_handle = pn->pipe_handle;
156 ph->data[0] = code; 156 ph->error_code = code;
157 return pn_skb_send(sk, skb, NULL); 157 return pn_skb_send(sk, skb, NULL);
158} 158}
159 159
@@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
208 struct pnpipehdr *ph; 208 struct pnpipehdr *ph;
209 struct sockaddr_pn dst; 209 struct sockaddr_pn dst;
210 u8 data[4] = { 210 u8 data[4] = {
211 oph->data[0], /* PEP type */ 211 oph->pep_type, /* PEP type */
212 code, /* error code, at an unusual offset */ 212 code, /* error code, at an unusual offset */
213 PAD, PAD, 213 PAD, PAD,
214 }; 214 };
@@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
221 ph->utid = oph->utid; 221 ph->utid = oph->utid;
222 ph->message_id = PNS_PEP_CTRL_RESP; 222 ph->message_id = PNS_PEP_CTRL_RESP;
223 ph->pipe_handle = oph->pipe_handle; 223 ph->pipe_handle = oph->pipe_handle;
224 ph->data[0] = oph->data[1]; /* CTRL id */ 224 ph->data0 = oph->data[0]; /* CTRL id */
225 225
226 pn_skb_get_src_sockaddr(oskb, &dst); 226 pn_skb_get_src_sockaddr(oskb, &dst);
227 return pn_skb_send(sk, skb, &dst); 227 return pn_skb_send(sk, skb, &dst);
@@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
272 return -EINVAL; 272 return -EINVAL;
273 273
274 hdr = pnp_hdr(skb); 274 hdr = pnp_hdr(skb);
275 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 275 if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
276 net_dbg_ratelimited("Phonet unknown PEP type: %u\n", 276 net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
277 (unsigned int)hdr->data[0]); 277 (unsigned int)hdr->pep_type);
278 return -EOPNOTSUPP; 278 return -EOPNOTSUPP;
279 } 279 }
280 280
281 switch (hdr->data[1]) { 281 switch (hdr->data[0]) {
282 case PN_PEP_IND_FLOW_CONTROL: 282 case PN_PEP_IND_FLOW_CONTROL:
283 switch (pn->tx_fc) { 283 switch (pn->tx_fc) {
284 case PN_LEGACY_FLOW_CONTROL: 284 case PN_LEGACY_FLOW_CONTROL:
285 switch (hdr->data[4]) { 285 switch (hdr->data[3]) {
286 case PEP_IND_BUSY: 286 case PEP_IND_BUSY:
287 atomic_set(&pn->tx_credits, 0); 287 atomic_set(&pn->tx_credits, 0);
288 break; 288 break;
@@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
292 } 292 }
293 break; 293 break;
294 case PN_ONE_CREDIT_FLOW_CONTROL: 294 case PN_ONE_CREDIT_FLOW_CONTROL:
295 if (hdr->data[4] == PEP_IND_READY) 295 if (hdr->data[3] == PEP_IND_READY)
296 atomic_set(&pn->tx_credits, wake = 1); 296 atomic_set(&pn->tx_credits, wake = 1);
297 break; 297 break;
298 } 298 }
@@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
301 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: 301 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
302 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) 302 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
303 break; 303 break;
304 atomic_add(wake = hdr->data[4], &pn->tx_credits); 304 atomic_add(wake = hdr->data[3], &pn->tx_credits);
305 break; 305 break;
306 306
307 default: 307 default:
308 net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", 308 net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
309 (unsigned int)hdr->data[1]); 309 (unsigned int)hdr->data[0]);
310 return -EOPNOTSUPP; 310 return -EOPNOTSUPP;
311 } 311 }
312 if (wake) 312 if (wake)
@@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
318{ 318{
319 struct pep_sock *pn = pep_sk(sk); 319 struct pep_sock *pn = pep_sk(sk);
320 struct pnpipehdr *hdr = pnp_hdr(skb); 320 struct pnpipehdr *hdr = pnp_hdr(skb);
321 u8 n_sb = hdr->data[0]; 321 u8 n_sb = hdr->data0;
322 322
323 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; 323 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
324 __skb_pull(skb, sizeof(*hdr)); 324 __skb_pull(skb, sizeof(*hdr));
@@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
506 return -ECONNREFUSED; 506 return -ECONNREFUSED;
507 507
508 /* Parse sub-blocks */ 508 /* Parse sub-blocks */
509 n_sb = hdr->data[4]; 509 n_sb = hdr->data[3];
510 while (n_sb > 0) { 510 while (n_sb > 0) {
511 u8 type, buf[6], len = sizeof(buf); 511 u8 type, buf[6], len = sizeof(buf);
512 const u8 *data = pep_get_sb(skb, &type, &len, buf); 512 const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk)
739 ph->utid = 0; 739 ph->utid = 0;
740 ph->message_id = PNS_PIPE_REMOVE_REQ; 740 ph->message_id = PNS_PIPE_REMOVE_REQ;
741 ph->pipe_handle = pn->pipe_handle; 741 ph->pipe_handle = pn->pipe_handle;
742 ph->data[0] = PAD; 742 ph->data0 = PAD;
743 return pn_skb_send(sk, skb, NULL); 743 return pn_skb_send(sk, skb, NULL);
744} 744}
745 745
@@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
817 peer_type = hdr->other_pep_type << 8; 817 peer_type = hdr->other_pep_type << 8;
818 818
819 /* Parse sub-blocks (options) */ 819 /* Parse sub-blocks (options) */
820 n_sb = hdr->data[4]; 820 n_sb = hdr->data[3];
821 while (n_sb > 0) { 821 while (n_sb > 0) {
822 u8 type, buf[1], len = sizeof(buf); 822 u8 type, buf[1], len = sizeof(buf);
823 const u8 *data = pep_get_sb(skb, &type, &len, buf); 823 const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
1109 ph->utid = 0; 1109 ph->utid = 0;
1110 if (pn->aligned) { 1110 if (pn->aligned) {
1111 ph->message_id = PNS_PIPE_ALIGNED_DATA; 1111 ph->message_id = PNS_PIPE_ALIGNED_DATA;
1112 ph->data[0] = 0; /* padding */ 1112 ph->data0 = 0; /* padding */
1113 } else 1113 } else
1114 ph->message_id = PNS_PIPE_DATA; 1114 ph->message_id = PNS_PIPE_DATA;
1115 ph->pipe_handle = pn->pipe_handle; 1115 ph->pipe_handle = pn->pipe_handle;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 9ccc93f257db..38bb882bb958 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -48,7 +48,7 @@ struct tcindex_data {
48 u32 hash; /* hash table size; 0 if undefined */ 48 u32 hash; /* hash table size; 0 if undefined */
49 u32 alloc_hash; /* allocated size */ 49 u32 alloc_hash; /* allocated size */
50 u32 fall_through; /* 0: only classify if explicit match */ 50 u32 fall_through; /* 0: only classify if explicit match */
51 struct rcu_head rcu; 51 struct rcu_work rwork;
52}; 52};
53 53
54static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) 54static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
@@ -221,17 +221,11 @@ found:
221 return 0; 221 return 0;
222} 222}
223 223
224static int tcindex_destroy_element(struct tcf_proto *tp, 224static void tcindex_destroy_work(struct work_struct *work)
225 void *arg, struct tcf_walker *walker)
226{
227 bool last;
228
229 return tcindex_delete(tp, arg, &last, NULL);
230}
231
232static void __tcindex_destroy(struct rcu_head *head)
233{ 225{
234 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); 226 struct tcindex_data *p = container_of(to_rcu_work(work),
227 struct tcindex_data,
228 rwork);
235 229
236 kfree(p->perfect); 230 kfree(p->perfect);
237 kfree(p->h); 231 kfree(p->h);
@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
258 return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); 252 return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
259} 253}
260 254
261static void __tcindex_partial_destroy(struct rcu_head *head) 255static void tcindex_partial_destroy_work(struct work_struct *work)
262{ 256{
263 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); 257 struct tcindex_data *p = container_of(to_rcu_work(work),
258 struct tcindex_data,
259 rwork);
264 260
265 kfree(p->perfect); 261 kfree(p->perfect);
266 kfree(p); 262 kfree(p);
@@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
275 kfree(cp->perfect); 271 kfree(cp->perfect);
276} 272}
277 273
278static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) 274static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
279{ 275{
280 int i, err = 0; 276 int i, err = 0;
281 277
@@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
289 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); 285 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
290 if (err < 0) 286 if (err < 0)
291 goto errout; 287 goto errout;
288#ifdef CONFIG_NET_CLS_ACT
289 cp->perfect[i].exts.net = net;
290#endif
292 } 291 }
293 292
294 return 0; 293 return 0;
@@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
305 struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) 304 struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
306{ 305{
307 struct tcindex_filter_result new_filter_result, *old_r = r; 306 struct tcindex_filter_result new_filter_result, *old_r = r;
308 struct tcindex_filter_result cr;
309 struct tcindex_data *cp = NULL, *oldp; 307 struct tcindex_data *cp = NULL, *oldp;
310 struct tcindex_filter *f = NULL; /* make gcc behave */ 308 struct tcindex_filter *f = NULL; /* make gcc behave */
309 struct tcf_result cr = {};
311 int err, balloc = 0; 310 int err, balloc = 0;
312 struct tcf_exts e; 311 struct tcf_exts e;
313 312
@@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
337 if (p->perfect) { 336 if (p->perfect) {
338 int i; 337 int i;
339 338
340 if (tcindex_alloc_perfect_hash(cp) < 0) 339 if (tcindex_alloc_perfect_hash(net, cp) < 0)
341 goto errout; 340 goto errout;
342 for (i = 0; i < cp->hash; i++) 341 for (i = 0; i < cp->hash; i++)
343 cp->perfect[i].res = p->perfect[i].res; 342 cp->perfect[i].res = p->perfect[i].res;
@@ -348,11 +347,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
348 err = tcindex_filter_result_init(&new_filter_result); 347 err = tcindex_filter_result_init(&new_filter_result);
349 if (err < 0) 348 if (err < 0)
350 goto errout1; 349 goto errout1;
351 err = tcindex_filter_result_init(&cr);
352 if (err < 0)
353 goto errout1;
354 if (old_r) 350 if (old_r)
355 cr.res = r->res; 351 cr = r->res;
356 352
357 if (tb[TCA_TCINDEX_HASH]) 353 if (tb[TCA_TCINDEX_HASH])
358 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); 354 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
406 err = -ENOMEM; 402 err = -ENOMEM;
407 if (!cp->perfect && !cp->h) { 403 if (!cp->perfect && !cp->h) {
408 if (valid_perfect_hash(cp)) { 404 if (valid_perfect_hash(cp)) {
409 if (tcindex_alloc_perfect_hash(cp) < 0) 405 if (tcindex_alloc_perfect_hash(net, cp) < 0)
410 goto errout_alloc; 406 goto errout_alloc;
411 balloc = 1; 407 balloc = 1;
412 } else { 408 } else {
@@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
443 } 439 }
444 440
445 if (tb[TCA_TCINDEX_CLASSID]) { 441 if (tb[TCA_TCINDEX_CLASSID]) {
446 cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); 442 cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
447 tcf_bind_filter(tp, &cr.res, base); 443 tcf_bind_filter(tp, &cr, base);
448 } 444 }
449 445
450 if (old_r && old_r != r) { 446 if (old_r && old_r != r) {
@@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
456 } 452 }
457 453
458 oldp = p; 454 oldp = p;
459 r->res = cr.res; 455 r->res = cr;
460 tcf_exts_change(&r->exts, &e); 456 tcf_exts_change(&r->exts, &e);
461 457
462 rcu_assign_pointer(tp->root, cp); 458 rcu_assign_pointer(tp->root, cp);
@@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
475 ; /* nothing */ 471 ; /* nothing */
476 472
477 rcu_assign_pointer(*fp, f); 473 rcu_assign_pointer(*fp, f);
474 } else {
475 tcf_exts_destroy(&new_filter_result.exts);
478 } 476 }
479 477
480 if (oldp) 478 if (oldp)
481 call_rcu(&oldp->rcu, __tcindex_partial_destroy); 479 tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
482 return 0; 480 return 0;
483 481
484errout_alloc: 482errout_alloc:
@@ -487,7 +485,6 @@ errout_alloc:
487 else if (balloc == 2) 485 else if (balloc == 2)
488 kfree(cp->h); 486 kfree(cp->h);
489errout1: 487errout1:
490 tcf_exts_destroy(&cr.exts);
491 tcf_exts_destroy(&new_filter_result.exts); 488 tcf_exts_destroy(&new_filter_result.exts);
492errout: 489errout:
493 kfree(cp); 490 kfree(cp);
@@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp,
562 struct netlink_ext_ack *extack) 559 struct netlink_ext_ack *extack)
563{ 560{
564 struct tcindex_data *p = rtnl_dereference(tp->root); 561 struct tcindex_data *p = rtnl_dereference(tp->root);
565 struct tcf_walker walker; 562 int i;
566 563
567 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); 564 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
568 walker.count = 0;
569 walker.skip = 0;
570 walker.fn = tcindex_destroy_element;
571 tcindex_walk(tp, &walker);
572 565
573 call_rcu(&p->rcu, __tcindex_destroy); 566 if (p->perfect) {
567 for (i = 0; i < p->hash; i++) {
568 struct tcindex_filter_result *r = p->perfect + i;
569
570 tcf_unbind_filter(tp, &r->res);
571 if (tcf_exts_get_net(&r->exts))
572 tcf_queue_work(&r->rwork,
573 tcindex_destroy_rexts_work);
574 else
575 __tcindex_destroy_rexts(r);
576 }
577 }
578
579 for (i = 0; p->h && i < p->hash; i++) {
580 struct tcindex_filter *f, *next;
581 bool last;
582
583 for (f = rtnl_dereference(p->h[i]); f; f = next) {
584 next = rtnl_dereference(f->next);
585 tcindex_delete(tp, &f->result, &last, NULL);
586 }
587 }
588
589 tcf_queue_work(&p->rwork, tcindex_destroy_work);
574} 590}
575 591
576 592
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 66ba2ce2320f..968a85fe4d4a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -500,7 +500,7 @@ static void dev_watchdog_down(struct net_device *dev)
500 * netif_carrier_on - set carrier 500 * netif_carrier_on - set carrier
501 * @dev: network device 501 * @dev: network device
502 * 502 *
503 * Device has detected that carrier. 503 * Device has detected acquisition of carrier.
504 */ 504 */
505void netif_carrier_on(struct net_device *dev) 505void netif_carrier_on(struct net_device *dev)
506{ 506{
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 078f01a8d582..435847d98b51 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
256 + nla_total_size(1) /* INET_DIAG_TOS */ 256 + nla_total_size(1) /* INET_DIAG_TOS */
257 + nla_total_size(1) /* INET_DIAG_TCLASS */ 257 + nla_total_size(1) /* INET_DIAG_TCLASS */
258 + nla_total_size(4) /* INET_DIAG_MARK */ 258 + nla_total_size(4) /* INET_DIAG_MARK */
259 + nla_total_size(4) /* INET_DIAG_CLASS_ID */
259 + nla_total_size(addrlen * asoc->peer.transport_count) 260 + nla_total_size(addrlen * asoc->peer.transport_count)
260 + nla_total_size(addrlen * addrcnt) 261 + nla_total_size(addrlen * addrcnt)
261 + nla_total_size(sizeof(struct inet_diag_meminfo)) 262 + nla_total_size(sizeof(struct inet_diag_meminfo))
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 123e9f2dc226..edfcf16e704c 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
36{ 36{
37 skb->ip_summed = CHECKSUM_NONE; 37 skb->ip_summed = CHECKSUM_NONE;
38 skb->csum_not_inet = 0; 38 skb->csum_not_inet = 0;
39 gso_reset_checksum(skb, ~0);
39 return sctp_compute_cksum(skb, skb_transport_offset(skb)); 40 return sctp_compute_cksum(skb, skb_transport_offset(skb));
40} 41}
41 42
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index f24633114dfd..2936ed17bf9e 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -144,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
144 } 144 }
145 } 145 }
146 146
147 for (i = outcnt; i < stream->outcnt; i++) 147 for (i = outcnt; i < stream->outcnt; i++) {
148 kfree(SCTP_SO(stream, i)->ext); 148 kfree(SCTP_SO(stream, i)->ext);
149 SCTP_SO(stream, i)->ext = NULL;
150 }
149} 151}
150 152
151static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, 153static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 033696e6f74f..ad158d311ffa 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -207,7 +207,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
207 207
208 /* When a data chunk is sent, reset the heartbeat interval. */ 208 /* When a data chunk is sent, reset the heartbeat interval. */
209 expires = jiffies + sctp_transport_timeout(transport); 209 expires = jiffies + sctp_transport_timeout(transport);
210 if (time_before(transport->hb_timer.expires, expires) && 210 if ((time_before(transport->hb_timer.expires, expires) ||
211 !timer_pending(&transport->hb_timer)) &&
211 !mod_timer(&transport->hb_timer, 212 !mod_timer(&transport->hb_timer,
212 expires + prandom_u32_max(transport->rto))) 213 expires + prandom_u32_max(transport->rto)))
213 sctp_transport_hold(transport); 214 sctp_transport_hold(transport);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 5721416d0605..adbdf195eb08 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -113,9 +113,9 @@ struct smc_host_cdc_msg { /* Connection Data Control message */
113} __aligned(8); 113} __aligned(8);
114 114
115enum smc_urg_state { 115enum smc_urg_state {
116 SMC_URG_VALID, /* data present */ 116 SMC_URG_VALID = 1, /* data present */
117 SMC_URG_NOTYET, /* data pending */ 117 SMC_URG_NOTYET = 2, /* data pending */
118 SMC_URG_READ /* data was already read */ 118 SMC_URG_READ = 3, /* data was already read */
119}; 119};
120 120
121struct smc_connection { 121struct smc_connection {
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index a712c9f8699b..fb07ad8d69a6 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -101,9 +101,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
101 101
102 conn->tx_cdc_seq++; 102 conn->tx_cdc_seq++;
103 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; 103 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
104 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, 104 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
105 &conn->local_tx_ctrl, conn);
106 smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn);
107 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); 105 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
108 if (!rc) 106 if (!rc)
109 smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); 107 smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index 271e2524dc8f..f1cdde9d4b89 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -211,26 +211,27 @@ static inline int smc_curs_diff_large(unsigned int size,
211 211
212static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, 212static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
213 union smc_host_cursor *local, 213 union smc_host_cursor *local,
214 union smc_host_cursor *save,
214 struct smc_connection *conn) 215 struct smc_connection *conn)
215{ 216{
216 union smc_host_cursor temp; 217 smc_curs_copy(save, local, conn);
217 218 peer->count = htonl(save->count);
218 smc_curs_copy(&temp, local, conn); 219 peer->wrap = htons(save->wrap);
219 peer->count = htonl(temp.count);
220 peer->wrap = htons(temp.wrap);
221 /* peer->reserved = htons(0); must be ensured by caller */ 220 /* peer->reserved = htons(0); must be ensured by caller */
222} 221}
223 222
224static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, 223static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
225 struct smc_host_cdc_msg *local, 224 struct smc_connection *conn,
226 struct smc_connection *conn) 225 union smc_host_cursor *save)
227{ 226{
227 struct smc_host_cdc_msg *local = &conn->local_tx_ctrl;
228
228 peer->common.type = local->common.type; 229 peer->common.type = local->common.type;
229 peer->len = local->len; 230 peer->len = local->len;
230 peer->seqno = htons(local->seqno); 231 peer->seqno = htons(local->seqno);
231 peer->token = htonl(local->token); 232 peer->token = htonl(local->token);
232 smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn); 233 smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn);
233 smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn); 234 smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn);
234 peer->prod_flags = local->prod_flags; 235 peer->prod_flags = local->prod_flags;
235 peer->conn_state_flags = local->conn_state_flags; 236 peer->conn_state_flags = local->conn_state_flags;
236} 237}
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index fb6656295204..507105127095 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
44 unsigned char *cksum, unsigned char *buf) 44 unsigned char *cksum, unsigned char *buf)
45{ 45{
46 struct crypto_sync_skcipher *cipher; 46 struct crypto_sync_skcipher *cipher;
47 unsigned char plain[8]; 47 unsigned char *plain;
48 s32 code; 48 s32 code;
49 49
50 dprintk("RPC: %s:\n", __func__); 50 dprintk("RPC: %s:\n", __func__);
@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
52 if (IS_ERR(cipher)) 52 if (IS_ERR(cipher))
53 return PTR_ERR(cipher); 53 return PTR_ERR(cipher);
54 54
55 plain = kmalloc(8, GFP_NOFS);
56 if (!plain)
57 return -ENOMEM;
58
55 plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); 59 plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
56 plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); 60 plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
57 plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); 61 plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
@@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
67 71
68 code = krb5_encrypt(cipher, cksum, plain, buf, 8); 72 code = krb5_encrypt(cipher, cksum, plain, buf, 8);
69out: 73out:
74 kfree(plain);
70 crypto_free_sync_skcipher(cipher); 75 crypto_free_sync_skcipher(cipher);
71 return code; 76 return code;
72} 77}
@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
77 u32 seqnum, 82 u32 seqnum,
78 unsigned char *cksum, unsigned char *buf) 83 unsigned char *cksum, unsigned char *buf)
79{ 84{
80 unsigned char plain[8]; 85 unsigned char *plain;
86 s32 code;
81 87
82 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) 88 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
83 return krb5_make_rc4_seq_num(kctx, direction, seqnum, 89 return krb5_make_rc4_seq_num(kctx, direction, seqnum,
84 cksum, buf); 90 cksum, buf);
85 91
92 plain = kmalloc(8, GFP_NOFS);
93 if (!plain)
94 return -ENOMEM;
95
86 plain[0] = (unsigned char) (seqnum & 0xff); 96 plain[0] = (unsigned char) (seqnum & 0xff);
87 plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); 97 plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
88 plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); 98 plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
93 plain[6] = direction; 103 plain[6] = direction;
94 plain[7] = direction; 104 plain[7] = direction;
95 105
96 return krb5_encrypt(key, cksum, plain, buf, 8); 106 code = krb5_encrypt(key, cksum, plain, buf, 8);
107 kfree(plain);
108 return code;
97} 109}
98 110
99static s32 111static s32
@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
101 unsigned char *buf, int *direction, s32 *seqnum) 113 unsigned char *buf, int *direction, s32 *seqnum)
102{ 114{
103 struct crypto_sync_skcipher *cipher; 115 struct crypto_sync_skcipher *cipher;
104 unsigned char plain[8]; 116 unsigned char *plain;
105 s32 code; 117 s32 code;
106 118
107 dprintk("RPC: %s:\n", __func__); 119 dprintk("RPC: %s:\n", __func__);
@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
113 if (code) 125 if (code)
114 goto out; 126 goto out;
115 127
128 plain = kmalloc(8, GFP_NOFS);
129 if (!plain) {
130 code = -ENOMEM;
131 goto out;
132 }
133
116 code = krb5_decrypt(cipher, cksum, buf, plain, 8); 134 code = krb5_decrypt(cipher, cksum, buf, plain, 8);
117 if (code) 135 if (code)
118 goto out; 136 goto out_plain;
119 137
120 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) 138 if ((plain[4] != plain[5]) || (plain[4] != plain[6])
121 || (plain[4] != plain[7])) { 139 || (plain[4] != plain[7])) {
122 code = (s32)KG_BAD_SEQ; 140 code = (s32)KG_BAD_SEQ;
123 goto out; 141 goto out_plain;
124 } 142 }
125 143
126 *direction = plain[4]; 144 *direction = plain[4];
127 145
128 *seqnum = ((plain[0] << 24) | (plain[1] << 16) | 146 *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
129 (plain[2] << 8) | (plain[3])); 147 (plain[2] << 8) | (plain[3]));
148out_plain:
149 kfree(plain);
130out: 150out:
131 crypto_free_sync_skcipher(cipher); 151 crypto_free_sync_skcipher(cipher);
132 return code; 152 return code;
@@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
139 int *direction, u32 *seqnum) 159 int *direction, u32 *seqnum)
140{ 160{
141 s32 code; 161 s32 code;
142 unsigned char plain[8]; 162 unsigned char *plain;
143 struct crypto_sync_skcipher *key = kctx->seq; 163 struct crypto_sync_skcipher *key = kctx->seq;
144 164
145 dprintk("RPC: krb5_get_seq_num:\n"); 165 dprintk("RPC: krb5_get_seq_num:\n");
@@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
147 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) 167 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
148 return krb5_get_rc4_seq_num(kctx, cksum, buf, 168 return krb5_get_rc4_seq_num(kctx, cksum, buf,
149 direction, seqnum); 169 direction, seqnum);
170 plain = kmalloc(8, GFP_NOFS);
171 if (!plain)
172 return -ENOMEM;
150 173
151 if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) 174 if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
152 return code; 175 goto out;
153 176
154 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || 177 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
155 (plain[4] != plain[7])) 178 (plain[4] != plain[7])) {
156 return (s32)KG_BAD_SEQ; 179 code = (s32)KG_BAD_SEQ;
180 goto out;
181 }
157 182
158 *direction = plain[4]; 183 *direction = plain[4];
159 184
160 *seqnum = ((plain[0]) | 185 *seqnum = ((plain[0]) |
161 (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); 186 (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
162 187
163 return 0; 188out:
189 kfree(plain);
190 return code;
164} 191}
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
index 45a033329cd4..19bb356230ed 100644
--- a/net/sunrpc/debugfs.c
+++ b/net/sunrpc/debugfs.c
@@ -146,7 +146,7 @@ rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
146 rcu_read_lock(); 146 rcu_read_lock();
147 xprt = rcu_dereference(clnt->cl_xprt); 147 xprt = rcu_dereference(clnt->cl_xprt);
148 /* no "debugfs" dentry? Don't bother with the symlink. */ 148 /* no "debugfs" dentry? Don't bother with the symlink. */
149 if (!xprt->debugfs) { 149 if (IS_ERR_OR_NULL(xprt->debugfs)) {
150 rcu_read_unlock(); 150 rcu_read_unlock();
151 return; 151 return;
152 } 152 }
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 4994e75945b8..21113bfd4eca 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -527,7 +527,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
527 527
528 sendcq = ib_alloc_cq(ia->ri_device, NULL, 528 sendcq = ib_alloc_cq(ia->ri_device, NULL,
529 ep->rep_attr.cap.max_send_wr + 1, 529 ep->rep_attr.cap.max_send_wr + 1,
530 1, IB_POLL_WORKQUEUE); 530 ia->ri_device->num_comp_vectors > 1 ? 1 : 0,
531 IB_POLL_WORKQUEUE);
531 if (IS_ERR(sendcq)) { 532 if (IS_ERR(sendcq)) {
532 rc = PTR_ERR(sendcq); 533 rc = PTR_ERR(sendcq);
533 goto out1; 534 goto out1;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 2792a3cae682..85ad5c0678d0 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1145,7 +1145,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1145 default: 1145 default:
1146 pr_warn("Dropping received illegal msg type\n"); 1146 pr_warn("Dropping received illegal msg type\n");
1147 kfree_skb(skb); 1147 kfree_skb(skb);
1148 return false; 1148 return true;
1149 }; 1149 };
1150} 1150}
1151 1151
@@ -1425,6 +1425,10 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1425 l->rcv_unacked = 0; 1425 l->rcv_unacked = 0;
1426 } else { 1426 } else {
1427 /* RESET_MSG or ACTIVATE_MSG */ 1427 /* RESET_MSG or ACTIVATE_MSG */
1428 if (mtyp == ACTIVATE_MSG) {
1429 msg_set_dest_session_valid(hdr, 1);
1430 msg_set_dest_session(hdr, l->peer_session);
1431 }
1428 msg_set_max_pkt(hdr, l->advertised_mtu); 1432 msg_set_max_pkt(hdr, l->advertised_mtu);
1429 strcpy(data, l->if_name); 1433 strcpy(data, l->if_name);
1430 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); 1434 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
@@ -1642,6 +1646,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1642 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1646 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1643 break; 1647 break;
1644 } 1648 }
1649
1650 /* If this endpoint was re-created while peer was ESTABLISHING
1651 * it doesn't know current session number. Force re-synch.
1652 */
1653 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
1654 l->session != msg_dest_session(hdr)) {
1655 if (less(l->session, msg_dest_session(hdr)))
1656 l->session = msg_dest_session(hdr) + 1;
1657 break;
1658 }
1659
1645 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1660 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1646 if (mtyp == RESET_MSG || !link_is_up(l)) 1661 if (mtyp == RESET_MSG || !link_is_up(l))
1647 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1662 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index a0924956bb61..d7e4b8b93f9d 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -360,6 +360,28 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n)
360 msg_set_bits(m, 1, 0, 0xffff, n); 360 msg_set_bits(m, 1, 0, 0xffff, n);
361} 361}
362 362
363/* Note: reusing bits in word 1 for ACTIVATE_MSG only, to re-synch
364 * link peer session number
365 */
366static inline bool msg_dest_session_valid(struct tipc_msg *m)
367{
368 return msg_bits(m, 1, 16, 0x1);
369}
370
371static inline void msg_set_dest_session_valid(struct tipc_msg *m, bool valid)
372{
373 msg_set_bits(m, 1, 16, 0x1, valid);
374}
375
376static inline u16 msg_dest_session(struct tipc_msg *m)
377{
378 return msg_bits(m, 1, 0, 0xffff);
379}
380
381static inline void msg_set_dest_session(struct tipc_msg *m, u16 n)
382{
383 msg_set_bits(m, 1, 0, 0xffff, n);
384}
363 385
364/* 386/*
365 * Word 2 387 * Word 2
diff --git a/net/tipc/node.c b/net/tipc/node.c
index db2a6c3e0be9..2dc4919ab23c 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -830,15 +830,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
830 tipc_node_write_lock(n); 830 tipc_node_write_lock(n);
831 if (!tipc_link_is_establishing(l)) { 831 if (!tipc_link_is_establishing(l)) {
832 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 832 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
833 if (delete) {
834 kfree(l);
835 le->link = NULL;
836 n->link_cnt--;
837 }
838 } else { 833 } else {
839 /* Defuse pending tipc_node_link_up() */ 834 /* Defuse pending tipc_node_link_up() */
835 tipc_link_reset(l);
840 tipc_link_fsm_evt(l, LINK_RESET_EVT); 836 tipc_link_fsm_evt(l, LINK_RESET_EVT);
841 } 837 }
838 if (delete) {
839 kfree(l);
840 le->link = NULL;
841 n->link_cnt--;
842 }
842 trace_tipc_node_link_down(n, true, "node link down or deleted!"); 843 trace_tipc_node_link_down(n, true, "node link down or deleted!");
843 tipc_node_write_unlock(n); 844 tipc_node_write_unlock(n);
844 if (delete) 845 if (delete)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1217c90a363b..684f2125fc6b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -388,7 +388,7 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
388 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 388 rc_ = tipc_sk_sock_err((sock_), timeo_); \
389 if (rc_) \ 389 if (rc_) \
390 break; \ 390 break; \
391 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 391 add_wait_queue(sk_sleep(sk_), &wait_); \
392 release_sock(sk_); \ 392 release_sock(sk_); \
393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
394 sched_annotate_sleep(); \ 394 sched_annotate_sleep(); \
@@ -1677,7 +1677,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk)
1677static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1677static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1678{ 1678{
1679 struct sock *sk = sock->sk; 1679 struct sock *sk = sock->sk;
1680 DEFINE_WAIT(wait); 1680 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1681 long timeo = *timeop; 1681 long timeo = *timeop;
1682 int err = sock_error(sk); 1682 int err = sock_error(sk);
1683 1683
@@ -1685,15 +1685,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1685 return err; 1685 return err;
1686 1686
1687 for (;;) { 1687 for (;;) {
1688 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1689 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1688 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1690 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1689 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1691 err = -ENOTCONN; 1690 err = -ENOTCONN;
1692 break; 1691 break;
1693 } 1692 }
1693 add_wait_queue(sk_sleep(sk), &wait);
1694 release_sock(sk); 1694 release_sock(sk);
1695 timeo = schedule_timeout(timeo); 1695 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1696 sched_annotate_sleep();
1696 lock_sock(sk); 1697 lock_sock(sk);
1698 remove_wait_queue(sk_sleep(sk), &wait);
1697 } 1699 }
1698 err = 0; 1700 err = 0;
1699 if (!skb_queue_empty(&sk->sk_receive_queue)) 1701 if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -1709,7 +1711,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1709 if (err) 1711 if (err)
1710 break; 1712 break;
1711 } 1713 }
1712 finish_wait(sk_sleep(sk), &wait);
1713 *timeop = timeo; 1714 *timeop = timeo;
1714 return err; 1715 return err;
1715} 1716}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 74d1eed7cbd4..a95d479caeea 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -890,7 +890,7 @@ retry:
890 addr->hash ^= sk->sk_type; 890 addr->hash ^= sk->sk_type;
891 891
892 __unix_remove_socket(sk); 892 __unix_remove_socket(sk);
893 u->addr = addr; 893 smp_store_release(&u->addr, addr);
894 __unix_insert_socket(&unix_socket_table[addr->hash], sk); 894 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
895 spin_unlock(&unix_table_lock); 895 spin_unlock(&unix_table_lock);
896 err = 0; 896 err = 0;
@@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1060 1060
1061 err = 0; 1061 err = 0;
1062 __unix_remove_socket(sk); 1062 __unix_remove_socket(sk);
1063 u->addr = addr; 1063 smp_store_release(&u->addr, addr);
1064 __unix_insert_socket(list, sk); 1064 __unix_insert_socket(list, sk);
1065 1065
1066out_unlock: 1066out_unlock:
@@ -1331,15 +1331,29 @@ restart:
1331 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); 1331 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1332 otheru = unix_sk(other); 1332 otheru = unix_sk(other);
1333 1333
1334 /* copy address information from listening to new sock*/ 1334 /* copy address information from listening to new sock
1335 if (otheru->addr) { 1335 *
1336 refcount_inc(&otheru->addr->refcnt); 1336 * The contents of *(otheru->addr) and otheru->path
1337 newu->addr = otheru->addr; 1337 * are seen fully set up here, since we have found
1338 } 1338 * otheru in hash under unix_table_lock. Insertion
1339 * into the hash chain we'd found it in had been done
1340 * in an earlier critical area protected by unix_table_lock,
1341 * the same one where we'd set *(otheru->addr) contents,
1342 * as well as otheru->path and otheru->addr itself.
1343 *
1344 * Using smp_store_release() here to set newu->addr
1345 * is enough to make those stores, as well as stores
1346 * to newu->path visible to anyone who gets newu->addr
1347 * by smp_load_acquire(). IOW, the same warranties
1348 * as for unix_sock instances bound in unix_bind() or
1349 * in unix_autobind().
1350 */
1339 if (otheru->path.dentry) { 1351 if (otheru->path.dentry) {
1340 path_get(&otheru->path); 1352 path_get(&otheru->path);
1341 newu->path = otheru->path; 1353 newu->path = otheru->path;
1342 } 1354 }
1355 refcount_inc(&otheru->addr->refcnt);
1356 smp_store_release(&newu->addr, otheru->addr);
1343 1357
1344 /* Set credentials */ 1358 /* Set credentials */
1345 copy_peercred(sk, other); 1359 copy_peercred(sk, other);
@@ -1453,7 +1467,7 @@ out:
1453static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) 1467static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1454{ 1468{
1455 struct sock *sk = sock->sk; 1469 struct sock *sk = sock->sk;
1456 struct unix_sock *u; 1470 struct unix_address *addr;
1457 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); 1471 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1458 int err = 0; 1472 int err = 0;
1459 1473
@@ -1468,19 +1482,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1468 sock_hold(sk); 1482 sock_hold(sk);
1469 } 1483 }
1470 1484
1471 u = unix_sk(sk); 1485 addr = smp_load_acquire(&unix_sk(sk)->addr);
1472 unix_state_lock(sk); 1486 if (!addr) {
1473 if (!u->addr) {
1474 sunaddr->sun_family = AF_UNIX; 1487 sunaddr->sun_family = AF_UNIX;
1475 sunaddr->sun_path[0] = 0; 1488 sunaddr->sun_path[0] = 0;
1476 err = sizeof(short); 1489 err = sizeof(short);
1477 } else { 1490 } else {
1478 struct unix_address *addr = u->addr;
1479
1480 err = addr->len; 1491 err = addr->len;
1481 memcpy(sunaddr, addr->name, addr->len); 1492 memcpy(sunaddr, addr->name, addr->len);
1482 } 1493 }
1483 unix_state_unlock(sk);
1484 sock_put(sk); 1494 sock_put(sk);
1485out: 1495out:
1486 return err; 1496 return err;
@@ -2073,11 +2083,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2073 2083
2074static void unix_copy_addr(struct msghdr *msg, struct sock *sk) 2084static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2075{ 2085{
2076 struct unix_sock *u = unix_sk(sk); 2086 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2077 2087
2078 if (u->addr) { 2088 if (addr) {
2079 msg->msg_namelen = u->addr->len; 2089 msg->msg_namelen = addr->len;
2080 memcpy(msg->msg_name, u->addr->name, u->addr->len); 2090 memcpy(msg->msg_name, addr->name, addr->len);
2081 } 2091 }
2082} 2092}
2083 2093
@@ -2581,15 +2591,14 @@ static int unix_open_file(struct sock *sk)
2581 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2591 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2582 return -EPERM; 2592 return -EPERM;
2583 2593
2584 unix_state_lock(sk); 2594 if (!smp_load_acquire(&unix_sk(sk)->addr))
2595 return -ENOENT;
2596
2585 path = unix_sk(sk)->path; 2597 path = unix_sk(sk)->path;
2586 if (!path.dentry) { 2598 if (!path.dentry)
2587 unix_state_unlock(sk);
2588 return -ENOENT; 2599 return -ENOENT;
2589 }
2590 2600
2591 path_get(&path); 2601 path_get(&path);
2592 unix_state_unlock(sk);
2593 2602
2594 fd = get_unused_fd_flags(O_CLOEXEC); 2603 fd = get_unused_fd_flags(O_CLOEXEC);
2595 if (fd < 0) 2604 if (fd < 0)
@@ -2830,7 +2839,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
2830 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), 2839 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2831 sock_i_ino(s)); 2840 sock_i_ino(s));
2832 2841
2833 if (u->addr) { 2842 if (u->addr) { // under unix_table_lock here
2834 int i, len; 2843 int i, len;
2835 seq_putc(seq, ' '); 2844 seq_putc(seq, ' ');
2836 2845
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e83462..3183d9b8ab33 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
10 10
11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) 11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
12{ 12{
13 struct unix_address *addr = unix_sk(sk)->addr; 13 /* might or might not have unix_table_lock */
14 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
14 15
15 if (!addr) 16 if (!addr)
16 return 0; 17 return 0;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index c361ce782412..c3d5ab01fba7 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
1651 1651
1652static void vmci_transport_destruct(struct vsock_sock *vsk) 1652static void vmci_transport_destruct(struct vsock_sock *vsk)
1653{ 1653{
1654 /* transport can be NULL if we hit a failure at init() time */
1655 if (!vmci_trans(vsk))
1656 return;
1657
1654 /* Ensure that the detach callback doesn't use the sk/vsk 1658 /* Ensure that the detach callback doesn't use the sk/vsk
1655 * we are about to destruct. 1659 * we are about to destruct.
1656 */ 1660 */
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 623dfe5e211c..b36ad8efb5e5 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1068,6 +1068,8 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync)
1068 1068
1069 ASSERT_RTNL(); 1069 ASSERT_RTNL();
1070 1070
1071 flush_work(&wdev->pmsr_free_wk);
1072
1071 nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); 1073 nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
1072 1074
1073 list_del_rcu(&wdev->list); 1075 list_del_rcu(&wdev->list);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 74150ad95823..d91a408db113 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -250,7 +250,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
250 [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = 250 [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] =
251 NLA_POLICY_MAX(NLA_U8, 15), 251 NLA_POLICY_MAX(NLA_U8, 15),
252 [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = 252 [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] =
253 NLA_POLICY_MAX(NLA_U8, 15), 253 NLA_POLICY_MAX(NLA_U8, 31),
254 [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, 254 [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 },
255 [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, 255 [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG },
256 [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, 256 [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG },
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index de9286703280..0216ab555249 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -256,8 +256,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
256 if (err) 256 if (err)
257 goto out_err; 257 goto out_err;
258 } else { 258 } else {
259 memcpy(req->mac_addr, nla_data(info->attrs[NL80211_ATTR_MAC]), 259 memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN);
260 ETH_ALEN);
261 memset(req->mac_addr_mask, 0xff, ETH_ALEN); 260 memset(req->mac_addr_mask, 0xff, ETH_ALEN);
262 } 261 }
263 262
@@ -272,6 +271,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
272 271
273 req->n_peers = count; 272 req->n_peers = count;
274 req->cookie = cfg80211_assign_cookie(rdev); 273 req->cookie = cfg80211_assign_cookie(rdev);
274 req->nl_portid = info->snd_portid;
275 275
276 err = rdev_start_pmsr(rdev, wdev, req); 276 err = rdev_start_pmsr(rdev, wdev, req);
277 if (err) 277 if (err)
@@ -530,14 +530,14 @@ free:
530} 530}
531EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); 531EXPORT_SYMBOL_GPL(cfg80211_pmsr_report);
532 532
533void cfg80211_pmsr_free_wk(struct work_struct *work) 533static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev)
534{ 534{
535 struct wireless_dev *wdev = container_of(work, struct wireless_dev,
536 pmsr_free_wk);
537 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 535 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
538 struct cfg80211_pmsr_request *req, *tmp; 536 struct cfg80211_pmsr_request *req, *tmp;
539 LIST_HEAD(free_list); 537 LIST_HEAD(free_list);
540 538
539 lockdep_assert_held(&wdev->mtx);
540
541 spin_lock_bh(&wdev->pmsr_lock); 541 spin_lock_bh(&wdev->pmsr_lock);
542 list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { 542 list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
543 if (req->nl_portid) 543 if (req->nl_portid)
@@ -547,14 +547,22 @@ void cfg80211_pmsr_free_wk(struct work_struct *work)
547 spin_unlock_bh(&wdev->pmsr_lock); 547 spin_unlock_bh(&wdev->pmsr_lock);
548 548
549 list_for_each_entry_safe(req, tmp, &free_list, list) { 549 list_for_each_entry_safe(req, tmp, &free_list, list) {
550 wdev_lock(wdev);
551 rdev_abort_pmsr(rdev, wdev, req); 550 rdev_abort_pmsr(rdev, wdev, req);
552 wdev_unlock(wdev);
553 551
554 kfree(req); 552 kfree(req);
555 } 553 }
556} 554}
557 555
556void cfg80211_pmsr_free_wk(struct work_struct *work)
557{
558 struct wireless_dev *wdev = container_of(work, struct wireless_dev,
559 pmsr_free_wk);
560
561 wdev_lock(wdev);
562 cfg80211_pmsr_process_abort(wdev);
563 wdev_unlock(wdev);
564}
565
558void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) 566void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
559{ 567{
560 struct cfg80211_pmsr_request *req; 568 struct cfg80211_pmsr_request *req;
@@ -568,8 +576,8 @@ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
568 spin_unlock_bh(&wdev->pmsr_lock); 576 spin_unlock_bh(&wdev->pmsr_lock);
569 577
570 if (found) 578 if (found)
571 schedule_work(&wdev->pmsr_free_wk); 579 cfg80211_pmsr_process_abort(wdev);
572 flush_work(&wdev->pmsr_free_wk); 580
573 WARN_ON(!list_empty(&wdev->pmsr_list)); 581 WARN_ON(!list_empty(&wdev->pmsr_list));
574} 582}
575 583
diff --git a/net/wireless/util.c b/net/wireless/util.c
index cd48cdd582c0..ec30e3732c7b 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -5,7 +5,7 @@
5 * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright 2017 Intel Deutschland GmbH 7 * Copyright 2017 Intel Deutschland GmbH
8 * Copyright (C) 2018 Intel Corporation 8 * Copyright (C) 2018-2019 Intel Corporation
9 */ 9 */
10#include <linux/export.h> 10#include <linux/export.h>
11#include <linux/bitops.h> 11#include <linux/bitops.h>
@@ -19,6 +19,7 @@
19#include <linux/mpls.h> 19#include <linux/mpls.h>
20#include <linux/gcd.h> 20#include <linux/gcd.h>
21#include <linux/bitfield.h> 21#include <linux/bitfield.h>
22#include <linux/nospec.h>
22#include "core.h" 23#include "core.h"
23#include "rdev-ops.h" 24#include "rdev-ops.h"
24 25
@@ -715,20 +716,25 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
715{ 716{
716 unsigned int dscp; 717 unsigned int dscp;
717 unsigned char vlan_priority; 718 unsigned char vlan_priority;
719 unsigned int ret;
718 720
719 /* skb->priority values from 256->263 are magic values to 721 /* skb->priority values from 256->263 are magic values to
720 * directly indicate a specific 802.1d priority. This is used 722 * directly indicate a specific 802.1d priority. This is used
721 * to allow 802.1d priority to be passed directly in from VLAN 723 * to allow 802.1d priority to be passed directly in from VLAN
722 * tags, etc. 724 * tags, etc.
723 */ 725 */
724 if (skb->priority >= 256 && skb->priority <= 263) 726 if (skb->priority >= 256 && skb->priority <= 263) {
725 return skb->priority - 256; 727 ret = skb->priority - 256;
728 goto out;
729 }
726 730
727 if (skb_vlan_tag_present(skb)) { 731 if (skb_vlan_tag_present(skb)) {
728 vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) 732 vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK)
729 >> VLAN_PRIO_SHIFT; 733 >> VLAN_PRIO_SHIFT;
730 if (vlan_priority > 0) 734 if (vlan_priority > 0) {
731 return vlan_priority; 735 ret = vlan_priority;
736 goto out;
737 }
732 } 738 }
733 739
734 switch (skb->protocol) { 740 switch (skb->protocol) {
@@ -747,8 +753,9 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
747 if (!mpls) 753 if (!mpls)
748 return 0; 754 return 0;
749 755
750 return (ntohl(mpls->entry) & MPLS_LS_TC_MASK) 756 ret = (ntohl(mpls->entry) & MPLS_LS_TC_MASK)
751 >> MPLS_LS_TC_SHIFT; 757 >> MPLS_LS_TC_SHIFT;
758 goto out;
752 } 759 }
753 case htons(ETH_P_80221): 760 case htons(ETH_P_80221):
754 /* 802.21 is always network control traffic */ 761 /* 802.21 is always network control traffic */
@@ -761,18 +768,24 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
761 unsigned int i, tmp_dscp = dscp >> 2; 768 unsigned int i, tmp_dscp = dscp >> 2;
762 769
763 for (i = 0; i < qos_map->num_des; i++) { 770 for (i = 0; i < qos_map->num_des; i++) {
764 if (tmp_dscp == qos_map->dscp_exception[i].dscp) 771 if (tmp_dscp == qos_map->dscp_exception[i].dscp) {
765 return qos_map->dscp_exception[i].up; 772 ret = qos_map->dscp_exception[i].up;
773 goto out;
774 }
766 } 775 }
767 776
768 for (i = 0; i < 8; i++) { 777 for (i = 0; i < 8; i++) {
769 if (tmp_dscp >= qos_map->up[i].low && 778 if (tmp_dscp >= qos_map->up[i].low &&
770 tmp_dscp <= qos_map->up[i].high) 779 tmp_dscp <= qos_map->up[i].high) {
771 return i; 780 ret = i;
781 goto out;
782 }
772 } 783 }
773 } 784 }
774 785
775 return dscp >> 5; 786 ret = dscp >> 5;
787out:
788 return array_index_nospec(ret, IEEE80211_NUM_TIDS);
776} 789}
777EXPORT_SYMBOL(cfg80211_classify8021d); 790EXPORT_SYMBOL(cfg80211_classify8021d);
778 791
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5121729b8b63..eff31348e20b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
352 unsigned int lci = 1; 352 unsigned int lci = 1;
353 struct sock *sk; 353 struct sock *sk;
354 354
355 read_lock_bh(&x25_list_lock); 355 while ((sk = x25_find_socket(lci, nb)) != NULL) {
356
357 while ((sk = __x25_find_socket(lci, nb)) != NULL) {
358 sock_put(sk); 356 sock_put(sk);
359 if (++lci == 4096) { 357 if (++lci == 4096) {
360 lci = 0; 358 lci = 0;
361 break; 359 break;
362 } 360 }
361 cond_resched();
363 } 362 }
364 363
365 read_unlock_bh(&x25_list_lock);
366 return lci; 364 return lci;
367} 365}
368 366
@@ -681,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
681 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 679 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
682 int len, i, rc = 0; 680 int len, i, rc = 0;
683 681
684 if (!sock_flag(sk, SOCK_ZAPPED) || 682 if (addr_len != sizeof(struct sockaddr_x25) ||
685 addr_len != sizeof(struct sockaddr_x25) ||
686 addr->sx25_family != AF_X25) { 683 addr->sx25_family != AF_X25) {
687 rc = -EINVAL; 684 rc = -EINVAL;
688 goto out; 685 goto out;
@@ -701,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
701 } 698 }
702 699
703 lock_sock(sk); 700 lock_sock(sk);
704 x25_sk(sk)->source_addr = addr->sx25_addr; 701 if (sock_flag(sk, SOCK_ZAPPED)) {
705 x25_insert_socket(sk); 702 x25_sk(sk)->source_addr = addr->sx25_addr;
706 sock_reset_flag(sk, SOCK_ZAPPED); 703 x25_insert_socket(sk);
704 sock_reset_flag(sk, SOCK_ZAPPED);
705 } else {
706 rc = -EINVAL;
707 }
707 release_sock(sk); 708 release_sock(sk);
708 SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); 709 SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
709out: 710out:
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index d4de871e7d4d..37e1fe180769 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -125,9 +125,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
125 return 0; 125 return 0;
126 126
127err_unreg_umem: 127err_unreg_umem:
128 xdp_clear_umem_at_qid(dev, queue_id);
129 if (!force_zc) 128 if (!force_zc)
130 err = 0; /* fallback to copy mode */ 129 err = 0; /* fallback to copy mode */
130 if (err)
131 xdp_clear_umem_at_qid(dev, queue_id);
131out_rtnl_unlock: 132out_rtnl_unlock:
132 rtnl_unlock(); 133 rtnl_unlock();
133 return err; 134 return err;
@@ -259,10 +260,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
259 if (!umem->pgs) 260 if (!umem->pgs)
260 return -ENOMEM; 261 return -ENOMEM;
261 262
262 down_write(&current->mm->mmap_sem); 263 down_read(&current->mm->mmap_sem);
263 npgs = get_user_pages(umem->address, umem->npgs, 264 npgs = get_user_pages_longterm(umem->address, umem->npgs,
264 gup_flags, &umem->pgs[0], NULL); 265 gup_flags, &umem->pgs[0], NULL);
265 up_write(&current->mm->mmap_sem); 266 up_read(&current->mm->mmap_sem);
266 267
267 if (npgs != umem->npgs) { 268 if (npgs != umem->npgs) {
268 if (npgs >= 0) { 269 if (npgs >= 0) {
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a03268454a27..85e4fe4f18cc 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -366,7 +366,6 @@ static int xsk_release(struct socket *sock)
366 366
367 xskq_destroy(xs->rx); 367 xskq_destroy(xs->rx);
368 xskq_destroy(xs->tx); 368 xskq_destroy(xs->tx);
369 xdp_put_umem(xs->umem);
370 369
371 sock_orphan(sk); 370 sock_orphan(sk);
372 sock->sk = NULL; 371 sock->sk = NULL;
@@ -669,6 +668,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
669 if (!umem) 668 if (!umem)
670 return -EINVAL; 669 return -EINVAL;
671 670
671 /* Matches the smp_wmb() in XDP_UMEM_REG */
672 smp_rmb();
672 if (offset == XDP_UMEM_PGOFF_FILL_RING) 673 if (offset == XDP_UMEM_PGOFF_FILL_RING)
673 q = READ_ONCE(umem->fq); 674 q = READ_ONCE(umem->fq);
674 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) 675 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
@@ -678,6 +679,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
678 if (!q) 679 if (!q)
679 return -EINVAL; 680 return -EINVAL;
680 681
682 /* Matches the smp_wmb() in xsk_init_queue */
683 smp_rmb();
681 qpg = virt_to_head_page(q->ring); 684 qpg = virt_to_head_page(q->ring);
682 if (size > (PAGE_SIZE << compound_order(qpg))) 685 if (size > (PAGE_SIZE << compound_order(qpg)))
683 return -EINVAL; 686 return -EINVAL;
@@ -714,6 +717,18 @@ static const struct proto_ops xsk_proto_ops = {
714 .sendpage = sock_no_sendpage, 717 .sendpage = sock_no_sendpage,
715}; 718};
716 719
720static void xsk_destruct(struct sock *sk)
721{
722 struct xdp_sock *xs = xdp_sk(sk);
723
724 if (!sock_flag(sk, SOCK_DEAD))
725 return;
726
727 xdp_put_umem(xs->umem);
728
729 sk_refcnt_debug_dec(sk);
730}
731
717static int xsk_create(struct net *net, struct socket *sock, int protocol, 732static int xsk_create(struct net *net, struct socket *sock, int protocol,
718 int kern) 733 int kern)
719{ 734{
@@ -740,6 +755,9 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
740 755
741 sk->sk_family = PF_XDP; 756 sk->sk_family = PF_XDP;
742 757
758 sk->sk_destruct = xsk_destruct;
759 sk_refcnt_debug_inc(sk);
760
743 sock_set_flag(sk, SOCK_RCU_FREE); 761 sock_set_flag(sk, SOCK_RCU_FREE);
744 762
745 xs = xdp_sk(sk); 763 xs = xdp_sk(sk);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 6be8c7df15bb..dbb3c1945b5c 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
76 int ifindex; 76 int ifindex;
77 struct xfrm_if *xi; 77 struct xfrm_if *xi;
78 78
79 if (!skb->dev) 79 if (!secpath_exists(skb) || !skb->dev)
80 return NULL; 80 return NULL;
81 81
82 xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id); 82 xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
83 ifindex = skb->dev->ifindex; 83 ifindex = skb->dev->ifindex;
84 84
85 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { 85 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ba0a4048c846..8d1a898d0ba5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3314,8 +3314,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3314 3314
3315 if (ifcb) { 3315 if (ifcb) {
3316 xi = ifcb->decode_session(skb); 3316 xi = ifcb->decode_session(skb);
3317 if (xi) 3317 if (xi) {
3318 if_id = xi->p.if_id; 3318 if_id = xi->p.if_id;
3319 net = xi->net;
3320 }
3319 } 3321 }
3320 rcu_read_unlock(); 3322 rcu_read_unlock();
3321 3323
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 23c92891758a..1bb971f46fc6 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x)
432} 432}
433EXPORT_SYMBOL(xfrm_state_free); 433EXPORT_SYMBOL(xfrm_state_free);
434 434
435static void xfrm_state_gc_destroy(struct xfrm_state *x) 435static void ___xfrm_state_destroy(struct xfrm_state *x)
436{ 436{
437 tasklet_hrtimer_cancel(&x->mtimer); 437 tasklet_hrtimer_cancel(&x->mtimer);
438 del_timer_sync(&x->rtimer); 438 del_timer_sync(&x->rtimer);
@@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
474 synchronize_rcu(); 474 synchronize_rcu();
475 475
476 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) 476 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
477 xfrm_state_gc_destroy(x); 477 ___xfrm_state_destroy(x);
478} 478}
479 479
480static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) 480static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
@@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
598} 598}
599EXPORT_SYMBOL(xfrm_state_alloc); 599EXPORT_SYMBOL(xfrm_state_alloc);
600 600
601void __xfrm_state_destroy(struct xfrm_state *x) 601void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
602{ 602{
603 WARN_ON(x->km.state != XFRM_STATE_DEAD); 603 WARN_ON(x->km.state != XFRM_STATE_DEAD);
604 604
605 spin_lock_bh(&xfrm_state_gc_lock); 605 if (sync) {
606 hlist_add_head(&x->gclist, &xfrm_state_gc_list); 606 synchronize_rcu();
607 spin_unlock_bh(&xfrm_state_gc_lock); 607 ___xfrm_state_destroy(x);
608 schedule_work(&xfrm_state_gc_work); 608 } else {
609 spin_lock_bh(&xfrm_state_gc_lock);
610 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
611 spin_unlock_bh(&xfrm_state_gc_lock);
612 schedule_work(&xfrm_state_gc_work);
613 }
609} 614}
610EXPORT_SYMBOL(__xfrm_state_destroy); 615EXPORT_SYMBOL(__xfrm_state_destroy);
611 616
@@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
708} 713}
709#endif 714#endif
710 715
711int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) 716int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
712{ 717{
713 int i, err = 0, cnt = 0; 718 int i, err = 0, cnt = 0;
714 719
@@ -730,7 +735,10 @@ restart:
730 err = xfrm_state_delete(x); 735 err = xfrm_state_delete(x);
731 xfrm_audit_state_delete(x, err ? 0 : 1, 736 xfrm_audit_state_delete(x, err ? 0 : 1,
732 task_valid); 737 task_valid);
733 xfrm_state_put(x); 738 if (sync)
739 xfrm_state_put_sync(x);
740 else
741 xfrm_state_put(x);
734 if (!err) 742 if (!err)
735 cnt++; 743 cnt++;
736 744
@@ -2215,7 +2223,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
2215 if (atomic_read(&t->tunnel_users) == 2) 2223 if (atomic_read(&t->tunnel_users) == 2)
2216 xfrm_state_delete(t); 2224 xfrm_state_delete(t);
2217 atomic_dec(&t->tunnel_users); 2225 atomic_dec(&t->tunnel_users);
2218 xfrm_state_put(t); 2226 xfrm_state_put_sync(t);
2219 x->tunnel = NULL; 2227 x->tunnel = NULL;
2220 } 2228 }
2221} 2229}
@@ -2375,8 +2383,8 @@ void xfrm_state_fini(struct net *net)
2375 unsigned int sz; 2383 unsigned int sz;
2376 2384
2377 flush_work(&net->xfrm.state_hash_work); 2385 flush_work(&net->xfrm.state_hash_work);
2378 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
2379 flush_work(&xfrm_state_gc_work); 2386 flush_work(&xfrm_state_gc_work);
2387 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
2380 2388
2381 WARN_ON(!list_empty(&net->xfrm.state_all)); 2389 WARN_ON(!list_empty(&net->xfrm.state_all));
2382 2390
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c6d26afcf89d..a131f9ff979e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1932,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1932 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1932 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1933 int err; 1933 int err;
1934 1934
1935 err = xfrm_state_flush(net, p->proto, true); 1935 err = xfrm_state_flush(net, p->proto, true, false);
1936 if (err) { 1936 if (err) {
1937 if (err == -ESRCH) /* empty table */ 1937 if (err == -ESRCH) /* empty table */
1938 return 0; 1938 return 0;
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 77cebad0474e..f75e7bda4889 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -118,8 +118,8 @@ static int read_symbol(FILE *in, struct sym_entry *s)
118 fprintf(stderr, "Read error or end of file.\n"); 118 fprintf(stderr, "Read error or end of file.\n");
119 return -1; 119 return -1;
120 } 120 }
121 if (strlen(sym) > KSYM_NAME_LEN) { 121 if (strlen(sym) >= KSYM_NAME_LEN) {
122 fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n" 122 fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n"
123 "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n", 123 "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n",
124 sym, strlen(sym), KSYM_NAME_LEN); 124 sym, strlen(sym), KSYM_NAME_LEN);
125 return -1; 125 return -1;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 479909b858c7..8f533c81aa8d 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -186,20 +186,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm)
186 return key_task_permission(key_ref, current_cred(), perm); 186 return key_task_permission(key_ref, current_cred(), perm);
187} 187}
188 188
189/*
190 * Authorisation record for request_key().
191 */
192struct request_key_auth {
193 struct key *target_key;
194 struct key *dest_keyring;
195 const struct cred *cred;
196 void *callout_info;
197 size_t callout_len;
198 pid_t pid;
199} __randomize_layout;
200
201extern struct key_type key_type_request_key_auth; 189extern struct key_type key_type_request_key_auth;
202extern struct key *request_key_auth_new(struct key *target, 190extern struct key *request_key_auth_new(struct key *target,
191 const char *op,
203 const void *callout_info, 192 const void *callout_info,
204 size_t callout_len, 193 size_t callout_len,
205 struct key *dest_keyring); 194 struct key *dest_keyring);
diff --git a/security/keys/key.c b/security/keys/key.c
index 44a80d6741a1..696f1c092c50 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
265 265
266 spin_lock(&user->lock); 266 spin_lock(&user->lock);
267 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { 267 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
268 if (user->qnkeys + 1 >= maxkeys || 268 if (user->qnkeys + 1 > maxkeys ||
269 user->qnbytes + quotalen >= maxbytes || 269 user->qnbytes + quotalen > maxbytes ||
270 user->qnbytes + quotalen < user->qnbytes) 270 user->qnbytes + quotalen < user->qnbytes)
271 goto no_quota; 271 goto no_quota;
272 } 272 }
@@ -297,6 +297,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
297 key->gid = gid; 297 key->gid = gid;
298 key->perm = perm; 298 key->perm = perm;
299 key->restrict_link = restrict_link; 299 key->restrict_link = restrict_link;
300 key->last_used_at = ktime_get_real_seconds();
300 301
301 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) 302 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
302 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 303 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index e8093d025966..7bbe03593e58 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -25,6 +25,7 @@
25#include <linux/security.h> 25#include <linux/security.h>
26#include <linux/uio.h> 26#include <linux/uio.h>
27#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <keys/request_key_auth-type.h>
28#include "internal.h" 29#include "internal.h"
29 30
30#define KEY_MAX_DESC_SIZE 4096 31#define KEY_MAX_DESC_SIZE 4096
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index eadebb92986a..f81372f53dd7 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
661 BUG_ON((ctx->flags & STATE_CHECKS) == 0 || 661 BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
662 (ctx->flags & STATE_CHECKS) == STATE_CHECKS); 662 (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
663 663
664 if (ctx->index_key.description)
665 ctx->index_key.desc_len = strlen(ctx->index_key.description);
666
667 /* Check to see if this top-level keyring is what we are looking for 664 /* Check to see if this top-level keyring is what we are looking for
668 * and whether it is valid or not. 665 * and whether it is valid or not.
669 */ 666 */
@@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
914 struct keyring_search_context ctx = { 911 struct keyring_search_context ctx = {
915 .index_key.type = type, 912 .index_key.type = type,
916 .index_key.description = description, 913 .index_key.description = description,
914 .index_key.desc_len = strlen(description),
917 .cred = current_cred(), 915 .cred = current_cred(),
918 .match_data.cmp = key_default_cmp, 916 .match_data.cmp = key_default_cmp,
919 .match_data.raw_data = description, 917 .match_data.raw_data = description,
diff --git a/security/keys/proc.c b/security/keys/proc.c
index d2b802072693..78ac305d715e 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -165,8 +165,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
165 int rc; 165 int rc;
166 166
167 struct keyring_search_context ctx = { 167 struct keyring_search_context ctx = {
168 .index_key.type = key->type, 168 .index_key = key->index_key,
169 .index_key.description = key->description,
170 .cred = m->file->f_cred, 169 .cred = m->file->f_cred,
171 .match_data.cmp = lookup_user_key_possessed, 170 .match_data.cmp = lookup_user_key_possessed,
172 .match_data.raw_data = key, 171 .match_data.raw_data = key,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 02c77e928f68..0e0b9ccad2f8 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -19,6 +19,7 @@
19#include <linux/security.h> 19#include <linux/security.h>
20#include <linux/user_namespace.h> 20#include <linux/user_namespace.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <keys/request_key_auth-type.h>
22#include "internal.h" 23#include "internal.h"
23 24
24/* Session keyring create vs join semaphore */ 25/* Session keyring create vs join semaphore */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 301f0e300dbd..7a0c6b666ff0 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -18,31 +18,30 @@
18#include <linux/keyctl.h> 18#include <linux/keyctl.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include "internal.h" 20#include "internal.h"
21#include <keys/request_key_auth-type.h>
21 22
22#define key_negative_timeout 60 /* default timeout on a negative key's existence */ 23#define key_negative_timeout 60 /* default timeout on a negative key's existence */
23 24
24/** 25/**
25 * complete_request_key - Complete the construction of a key. 26 * complete_request_key - Complete the construction of a key.
26 * @cons: The key construction record. 27 * @auth_key: The authorisation key.
27 * @error: The success or failute of the construction. 28 * @error: The success or failute of the construction.
28 * 29 *
29 * Complete the attempt to construct a key. The key will be negated 30 * Complete the attempt to construct a key. The key will be negated
30 * if an error is indicated. The authorisation key will be revoked 31 * if an error is indicated. The authorisation key will be revoked
31 * unconditionally. 32 * unconditionally.
32 */ 33 */
33void complete_request_key(struct key_construction *cons, int error) 34void complete_request_key(struct key *authkey, int error)
34{ 35{
35 kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error); 36 struct request_key_auth *rka = get_request_key_auth(authkey);
37 struct key *key = rka->target_key;
38
39 kenter("%d{%d},%d", authkey->serial, key->serial, error);
36 40
37 if (error < 0) 41 if (error < 0)
38 key_negate_and_link(cons->key, key_negative_timeout, NULL, 42 key_negate_and_link(key, key_negative_timeout, NULL, authkey);
39 cons->authkey);
40 else 43 else
41 key_revoke(cons->authkey); 44 key_revoke(authkey);
42
43 key_put(cons->key);
44 key_put(cons->authkey);
45 kfree(cons);
46} 45}
47EXPORT_SYMBOL(complete_request_key); 46EXPORT_SYMBOL(complete_request_key);
48 47
@@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp,
91 * Request userspace finish the construction of a key 90 * Request userspace finish the construction of a key
92 * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" 91 * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
93 */ 92 */
94static int call_sbin_request_key(struct key_construction *cons, 93static int call_sbin_request_key(struct key *authkey, void *aux)
95 const char *op,
96 void *aux)
97{ 94{
98 static char const request_key[] = "/sbin/request-key"; 95 static char const request_key[] = "/sbin/request-key";
96 struct request_key_auth *rka = get_request_key_auth(authkey);
99 const struct cred *cred = current_cred(); 97 const struct cred *cred = current_cred();
100 key_serial_t prkey, sskey; 98 key_serial_t prkey, sskey;
101 struct key *key = cons->key, *authkey = cons->authkey, *keyring, 99 struct key *key = rka->target_key, *keyring, *session;
102 *session;
103 char *argv[9], *envp[3], uid_str[12], gid_str[12]; 100 char *argv[9], *envp[3], uid_str[12], gid_str[12];
104 char key_str[12], keyring_str[3][12]; 101 char key_str[12], keyring_str[3][12];
105 char desc[20]; 102 char desc[20];
106 int ret, i; 103 int ret, i;
107 104
108 kenter("{%d},{%d},%s", key->serial, authkey->serial, op); 105 kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
109 106
110 ret = install_user_keyrings(); 107 ret = install_user_keyrings();
111 if (ret < 0) 108 if (ret < 0)
@@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons,
163 /* set up the argument list */ 160 /* set up the argument list */
164 i = 0; 161 i = 0;
165 argv[i++] = (char *)request_key; 162 argv[i++] = (char *)request_key;
166 argv[i++] = (char *) op; 163 argv[i++] = (char *)rka->op;
167 argv[i++] = key_str; 164 argv[i++] = key_str;
168 argv[i++] = uid_str; 165 argv[i++] = uid_str;
169 argv[i++] = gid_str; 166 argv[i++] = gid_str;
@@ -191,7 +188,7 @@ error_link:
191 key_put(keyring); 188 key_put(keyring);
192 189
193error_alloc: 190error_alloc:
194 complete_request_key(cons, ret); 191 complete_request_key(authkey, ret);
195 kleave(" = %d", ret); 192 kleave(" = %d", ret);
196 return ret; 193 return ret;
197} 194}
@@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info,
205 size_t callout_len, void *aux, 202 size_t callout_len, void *aux,
206 struct key *dest_keyring) 203 struct key *dest_keyring)
207{ 204{
208 struct key_construction *cons;
209 request_key_actor_t actor; 205 request_key_actor_t actor;
210 struct key *authkey; 206 struct key *authkey;
211 int ret; 207 int ret;
212 208
213 kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); 209 kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
214 210
215 cons = kmalloc(sizeof(*cons), GFP_KERNEL);
216 if (!cons)
217 return -ENOMEM;
218
219 /* allocate an authorisation key */ 211 /* allocate an authorisation key */
220 authkey = request_key_auth_new(key, callout_info, callout_len, 212 authkey = request_key_auth_new(key, "create", callout_info, callout_len,
221 dest_keyring); 213 dest_keyring);
222 if (IS_ERR(authkey)) { 214 if (IS_ERR(authkey))
223 kfree(cons); 215 return PTR_ERR(authkey);
224 ret = PTR_ERR(authkey);
225 authkey = NULL;
226 } else {
227 cons->authkey = key_get(authkey);
228 cons->key = key_get(key);
229 216
230 /* make the call */ 217 /* Make the call */
231 actor = call_sbin_request_key; 218 actor = call_sbin_request_key;
232 if (key->type->request_key) 219 if (key->type->request_key)
233 actor = key->type->request_key; 220 actor = key->type->request_key;
234 221
235 ret = actor(cons, "create", aux); 222 ret = actor(authkey, aux);
236 223
237 /* check that the actor called complete_request_key() prior to 224 /* check that the actor called complete_request_key() prior to
238 * returning an error */ 225 * returning an error */
239 WARN_ON(ret < 0 && 226 WARN_ON(ret < 0 &&
240 !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); 227 !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
241 key_put(authkey);
242 }
243 228
229 key_put(authkey);
244 kleave(" = %d", ret); 230 kleave(" = %d", ret);
245 return ret; 231 return ret;
246} 232}
@@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
275 if (cred->request_key_auth) { 261 if (cred->request_key_auth) {
276 authkey = cred->request_key_auth; 262 authkey = cred->request_key_auth;
277 down_read(&authkey->sem); 263 down_read(&authkey->sem);
278 rka = authkey->payload.data[0]; 264 rka = get_request_key_auth(authkey);
279 if (!test_bit(KEY_FLAG_REVOKED, 265 if (!test_bit(KEY_FLAG_REVOKED,
280 &authkey->flags)) 266 &authkey->flags))
281 dest_keyring = 267 dest_keyring =
@@ -545,6 +531,7 @@ struct key *request_key_and_link(struct key_type *type,
545 struct keyring_search_context ctx = { 531 struct keyring_search_context ctx = {
546 .index_key.type = type, 532 .index_key.type = type,
547 .index_key.description = description, 533 .index_key.description = description,
534 .index_key.desc_len = strlen(description),
548 .cred = current_cred(), 535 .cred = current_cred(),
549 .match_data.cmp = key_default_cmp, 536 .match_data.cmp = key_default_cmp,
550 .match_data.raw_data = description, 537 .match_data.raw_data = description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 87ea2f54dedc..bda6201c6c45 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -17,7 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include "internal.h" 19#include "internal.h"
20#include <keys/user-type.h> 20#include <keys/request_key_auth-type.h>
21 21
22static int request_key_auth_preparse(struct key_preparsed_payload *); 22static int request_key_auth_preparse(struct key_preparsed_payload *);
23static void request_key_auth_free_preparse(struct key_preparsed_payload *); 23static void request_key_auth_free_preparse(struct key_preparsed_payload *);
@@ -68,7 +68,7 @@ static int request_key_auth_instantiate(struct key *key,
68static void request_key_auth_describe(const struct key *key, 68static void request_key_auth_describe(const struct key *key,
69 struct seq_file *m) 69 struct seq_file *m)
70{ 70{
71 struct request_key_auth *rka = key->payload.data[0]; 71 struct request_key_auth *rka = get_request_key_auth(key);
72 72
73 seq_puts(m, "key:"); 73 seq_puts(m, "key:");
74 seq_puts(m, key->description); 74 seq_puts(m, key->description);
@@ -83,7 +83,7 @@ static void request_key_auth_describe(const struct key *key,
83static long request_key_auth_read(const struct key *key, 83static long request_key_auth_read(const struct key *key,
84 char __user *buffer, size_t buflen) 84 char __user *buffer, size_t buflen)
85{ 85{
86 struct request_key_auth *rka = key->payload.data[0]; 86 struct request_key_auth *rka = get_request_key_auth(key);
87 size_t datalen; 87 size_t datalen;
88 long ret; 88 long ret;
89 89
@@ -109,7 +109,7 @@ static long request_key_auth_read(const struct key *key,
109 */ 109 */
110static void request_key_auth_revoke(struct key *key) 110static void request_key_auth_revoke(struct key *key)
111{ 111{
112 struct request_key_auth *rka = key->payload.data[0]; 112 struct request_key_auth *rka = get_request_key_auth(key);
113 113
114 kenter("{%d}", key->serial); 114 kenter("{%d}", key->serial);
115 115
@@ -136,7 +136,7 @@ static void free_request_key_auth(struct request_key_auth *rka)
136 */ 136 */
137static void request_key_auth_destroy(struct key *key) 137static void request_key_auth_destroy(struct key *key)
138{ 138{
139 struct request_key_auth *rka = key->payload.data[0]; 139 struct request_key_auth *rka = get_request_key_auth(key);
140 140
141 kenter("{%d}", key->serial); 141 kenter("{%d}", key->serial);
142 142
@@ -147,8 +147,9 @@ static void request_key_auth_destroy(struct key *key)
147 * Create an authorisation token for /sbin/request-key or whoever to gain 147 * Create an authorisation token for /sbin/request-key or whoever to gain
148 * access to the caller's security data. 148 * access to the caller's security data.
149 */ 149 */
150struct key *request_key_auth_new(struct key *target, const void *callout_info, 150struct key *request_key_auth_new(struct key *target, const char *op,
151 size_t callout_len, struct key *dest_keyring) 151 const void *callout_info, size_t callout_len,
152 struct key *dest_keyring)
152{ 153{
153 struct request_key_auth *rka, *irka; 154 struct request_key_auth *rka, *irka;
154 const struct cred *cred = current->cred; 155 const struct cred *cred = current->cred;
@@ -166,6 +167,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
166 if (!rka->callout_info) 167 if (!rka->callout_info)
167 goto error_free_rka; 168 goto error_free_rka;
168 rka->callout_len = callout_len; 169 rka->callout_len = callout_len;
170 strlcpy(rka->op, op, sizeof(rka->op));
169 171
170 /* see if the calling process is already servicing the key request of 172 /* see if the calling process is already servicing the key request of
171 * another process */ 173 * another process */
@@ -245,7 +247,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
245 struct key *authkey; 247 struct key *authkey;
246 key_ref_t authkey_ref; 248 key_ref_t authkey_ref;
247 249
248 sprintf(description, "%x", target_id); 250 ctx.index_key.desc_len = sprintf(description, "%x", target_id);
249 251
250 authkey_ref = search_process_keyrings(&ctx); 252 authkey_ref = search_process_keyrings(&ctx);
251 253
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index f84001019356..33028c098ef3 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -321,6 +321,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
321 if (a->u.net->sk) { 321 if (a->u.net->sk) {
322 struct sock *sk = a->u.net->sk; 322 struct sock *sk = a->u.net->sk;
323 struct unix_sock *u; 323 struct unix_sock *u;
324 struct unix_address *addr;
324 int len = 0; 325 int len = 0;
325 char *p = NULL; 326 char *p = NULL;
326 327
@@ -351,14 +352,15 @@ static void dump_common_audit_data(struct audit_buffer *ab,
351#endif 352#endif
352 case AF_UNIX: 353 case AF_UNIX:
353 u = unix_sk(sk); 354 u = unix_sk(sk);
355 addr = smp_load_acquire(&u->addr);
356 if (!addr)
357 break;
354 if (u->path.dentry) { 358 if (u->path.dentry) {
355 audit_log_d_path(ab, " path=", &u->path); 359 audit_log_d_path(ab, " path=", &u->path);
356 break; 360 break;
357 } 361 }
358 if (!u->addr) 362 len = addr->len-sizeof(short);
359 break; 363 p = &addr->name->sun_path[0];
360 len = u->addr->len-sizeof(short);
361 p = &u->addr->name->sun_path[0];
362 audit_log_format(ab, " path="); 364 audit_log_format(ab, " path=");
363 if (*p) 365 if (*p)
364 audit_log_untrustedstring(ab, p); 366 audit_log_untrustedstring(ab, p);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 6c99fa8ac5fa..6c0b30391ba9 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2112,13 +2112,6 @@ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2112 return 0; 2112 return 0;
2113} 2113}
2114 2114
2115/* allow waiting for a capture stream that hasn't been started */
2116#if IS_ENABLED(CONFIG_SND_PCM_OSS)
2117#define wait_capture_start(substream) ((substream)->oss.oss)
2118#else
2119#define wait_capture_start(substream) false
2120#endif
2121
2122/* the common loop for read/write data */ 2115/* the common loop for read/write data */
2123snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, 2116snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2124 void *data, bool interleaved, 2117 void *data, bool interleaved,
@@ -2184,16 +2177,11 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2184 snd_pcm_update_hw_ptr(substream); 2177 snd_pcm_update_hw_ptr(substream);
2185 2178
2186 if (!is_playback && 2179 if (!is_playback &&
2187 runtime->status->state == SNDRV_PCM_STATE_PREPARED) { 2180 runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2188 if (size >= runtime->start_threshold) { 2181 size >= runtime->start_threshold) {
2189 err = snd_pcm_start(substream); 2182 err = snd_pcm_start(substream);
2190 if (err < 0) 2183 if (err < 0)
2191 goto _end_unlock;
2192 } else if (!wait_capture_start(substream)) {
2193 /* nothing to do */
2194 err = 0;
2195 goto _end_unlock; 2184 goto _end_unlock;
2196 }
2197 } 2185 }
2198 2186
2199 avail = snd_pcm_avail(substream); 2187 avail = snd_pcm_avail(substream);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 152f54137082..a4ee7656d9ee 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -924,6 +924,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
924 SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), 924 SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
925 SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), 925 SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
926 SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), 926 SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
927 SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
927 SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), 928 SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
928 SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), 929 SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
929 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), 930 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6df758adff84..1ffa36e987b4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1855,6 +1855,8 @@ enum {
1855 ALC887_FIXUP_BASS_CHMAP, 1855 ALC887_FIXUP_BASS_CHMAP,
1856 ALC1220_FIXUP_GB_DUAL_CODECS, 1856 ALC1220_FIXUP_GB_DUAL_CODECS,
1857 ALC1220_FIXUP_CLEVO_P950, 1857 ALC1220_FIXUP_CLEVO_P950,
1858 ALC1220_FIXUP_SYSTEM76_ORYP5,
1859 ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
1858}; 1860};
1859 1861
1860static void alc889_fixup_coef(struct hda_codec *codec, 1862static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2056,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
2056 snd_hda_override_conn_list(codec, 0x1b, 1, conn1); 2058 snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
2057} 2059}
2058 2060
2061static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
2062 const struct hda_fixup *fix, int action);
2063
2064static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
2065 const struct hda_fixup *fix,
2066 int action)
2067{
2068 alc1220_fixup_clevo_p950(codec, fix, action);
2069 alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
2070}
2071
2059static const struct hda_fixup alc882_fixups[] = { 2072static const struct hda_fixup alc882_fixups[] = {
2060 [ALC882_FIXUP_ABIT_AW9D_MAX] = { 2073 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
2061 .type = HDA_FIXUP_PINS, 2074 .type = HDA_FIXUP_PINS,
@@ -2300,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
2300 .type = HDA_FIXUP_FUNC, 2313 .type = HDA_FIXUP_FUNC,
2301 .v.func = alc1220_fixup_clevo_p950, 2314 .v.func = alc1220_fixup_clevo_p950,
2302 }, 2315 },
2316 [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
2317 .type = HDA_FIXUP_FUNC,
2318 .v.func = alc1220_fixup_system76_oryp5,
2319 },
2320 [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
2321 .type = HDA_FIXUP_PINS,
2322 .v.pins = (const struct hda_pintbl[]) {
2323 { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
2324 {}
2325 },
2326 .chained = true,
2327 .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
2328 },
2303}; 2329};
2304 2330
2305static const struct snd_pci_quirk alc882_fixup_tbl[] = { 2331static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2376,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2376 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), 2402 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
2377 SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), 2403 SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
2378 SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), 2404 SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
2405 SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
2406 SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
2379 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), 2407 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
2380 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), 2408 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
2381 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), 2409 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -5632,6 +5660,7 @@ enum {
5632 ALC294_FIXUP_ASUS_SPK, 5660 ALC294_FIXUP_ASUS_SPK,
5633 ALC225_FIXUP_HEADSET_JACK, 5661 ALC225_FIXUP_HEADSET_JACK,
5634 ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE, 5662 ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
5663 ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
5635}; 5664};
5636 5665
5637static const struct hda_fixup alc269_fixups[] = { 5666static const struct hda_fixup alc269_fixups[] = {
@@ -6587,6 +6616,17 @@ static const struct hda_fixup alc269_fixups[] = {
6587 .chained = true, 6616 .chained = true,
6588 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC 6617 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6589 }, 6618 },
6619 [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
6620 .type = HDA_FIXUP_VERBS,
6621 .v.verbs = (const struct hda_verb[]) {
6622 /* Disable PCBEEP-IN passthrough */
6623 { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
6624 { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
6625 { }
6626 },
6627 .chained = true,
6628 .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
6629 },
6590}; 6630};
6591 6631
6592static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6632static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7272,7 +7312,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7272 {0x12, 0x90a60130}, 7312 {0x12, 0x90a60130},
7273 {0x19, 0x03a11020}, 7313 {0x19, 0x03a11020},
7274 {0x21, 0x0321101f}), 7314 {0x21, 0x0321101f}),
7275 SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, 7315 SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
7276 {0x12, 0x90a60130}, 7316 {0x12, 0x90a60130},
7277 {0x14, 0x90170110}, 7317 {0x14, 0x90170110},
7278 {0x19, 0x04a11040}, 7318 {0x19, 0x04a11040},
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index d00734d31e04..e5b6769b9797 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
795 if (hcd->spdif) 795 if (hcd->spdif)
796 hcp->daidrv[i] = hdmi_spdif_dai; 796 hcp->daidrv[i] = hdmi_spdif_dai;
797 797
798 dev_set_drvdata(dev, hcp);
799
798 ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv, 800 ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv,
799 dai_count); 801 dai_count);
800 if (ret) { 802 if (ret) {
@@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
802 __func__, ret); 804 __func__, ret);
803 return ret; 805 return ret;
804 } 806 }
805
806 dev_set_drvdata(dev, hcp);
807 return 0; 807 return 0;
808} 808}
809 809
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 89c43b26c379..a9b91bcfcc09 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -1778,7 +1778,9 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
1778 {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc}, 1778 {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc},
1779 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, 1779 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
1780 {"ADC STO1 ASRC", NULL, "AD ASRC"}, 1780 {"ADC STO1 ASRC", NULL, "AD ASRC"},
1781 {"ADC STO1 ASRC", NULL, "DA ASRC"},
1781 {"ADC STO1 ASRC", NULL, "CLKDET"}, 1782 {"ADC STO1 ASRC", NULL, "CLKDET"},
1783 {"DAC STO1 ASRC", NULL, "AD ASRC"},
1782 {"DAC STO1 ASRC", NULL, "DA ASRC"}, 1784 {"DAC STO1 ASRC", NULL, "DA ASRC"},
1783 {"DAC STO1 ASRC", NULL, "CLKDET"}, 1785 {"DAC STO1 ASRC", NULL, "CLKDET"},
1784 1786
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 37e001cf9cd1..3fe34417ec89 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -462,7 +462,7 @@ static int asoc_simple_card_parse_of(struct simple_card_data *priv)
462 conf_idx = 0; 462 conf_idx = 0;
463 node = of_get_child_by_name(top, PREFIX "dai-link"); 463 node = of_get_child_by_name(top, PREFIX "dai-link");
464 if (!node) { 464 if (!node) {
465 node = dev->of_node; 465 node = of_node_get(top);
466 loop = 0; 466 loop = 0;
467 } 467 }
468 468
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index d6c62aa13041..d4bde4834ce5 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -604,6 +604,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
604 unsigned int fmt) 604 unsigned int fmt)
605{ 605{
606 struct i2s_dai *i2s = to_info(dai); 606 struct i2s_dai *i2s = to_info(dai);
607 struct i2s_dai *other = get_other_dai(i2s);
607 int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave; 608 int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
608 u32 mod, tmp = 0; 609 u32 mod, tmp = 0;
609 unsigned long flags; 610 unsigned long flags;
@@ -661,7 +662,8 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
661 * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any 662 * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
662 * clock configuration assigned in DT is not overwritten. 663 * clock configuration assigned in DT is not overwritten.
663 */ 664 */
664 if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL) 665 if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL &&
666 other->clk_data.clks == NULL)
665 i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, 667 i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
666 0, SND_SOC_CLOCK_IN); 668 0, SND_SOC_CLOCK_IN);
667 break; 669 break;
@@ -699,7 +701,9 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
699 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) 701 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
700{ 702{
701 struct i2s_dai *i2s = to_info(dai); 703 struct i2s_dai *i2s = to_info(dai);
704 struct i2s_dai *other = get_other_dai(i2s);
702 u32 mod, mask = 0, val = 0; 705 u32 mod, mask = 0, val = 0;
706 struct clk *rclksrc;
703 unsigned long flags; 707 unsigned long flags;
704 708
705 WARN_ON(!pm_runtime_active(dai->dev)); 709 WARN_ON(!pm_runtime_active(dai->dev));
@@ -782,6 +786,13 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
782 786
783 i2s->frmclk = params_rate(params); 787 i2s->frmclk = params_rate(params);
784 788
789 rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
790 if (!rclksrc || IS_ERR(rclksrc))
791 rclksrc = other->clk_table[CLK_I2S_RCLK_SRC];
792
793 if (rclksrc && !IS_ERR(rclksrc))
794 i2s->rclk_srcrate = clk_get_rate(rclksrc);
795
785 return 0; 796 return 0;
786} 797}
787 798
@@ -886,11 +897,6 @@ static int config_setup(struct i2s_dai *i2s)
886 return 0; 897 return 0;
887 898
888 if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { 899 if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
889 struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
890
891 if (rclksrc && !IS_ERR(rclksrc))
892 i2s->rclk_srcrate = clk_get_rate(rclksrc);
893
894 psr = i2s->rclk_srcrate / i2s->frmclk / rfs; 900 psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
895 writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); 901 writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
896 dev_dbg(&i2s->pdev->dev, 902 dev_dbg(&i2s->pdev->dev,
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 59e250cc2e9d..e819e965e1db 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1526,14 +1526,14 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
1526 int ret; 1526 int ret;
1527 1527
1528 /* 1528 /*
1529 * 1) Avoid duplicate register (ex. MIXer case) 1529 * 1) Avoid duplicate register for DVC with MIX case
1530 * 2) re-register if card was rebinded 1530 * 2) Allow duplicate register for MIX
1531 * 3) re-register if card was rebinded
1531 */ 1532 */
1532 list_for_each_entry(kctrl, &card->controls, list) { 1533 list_for_each_entry(kctrl, &card->controls, list) {
1533 struct rsnd_kctrl_cfg *c = kctrl->private_data; 1534 struct rsnd_kctrl_cfg *c = kctrl->private_data;
1534 1535
1535 if (strcmp(kctrl->id.name, name) == 0 && 1536 if (c == cfg)
1536 c->mod == mod)
1537 return 0; 1537 return 0;
1538 } 1538 }
1539 1539
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 45ef295743ec..f5afab631abb 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -286,7 +286,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
286 if (rsnd_ssi_is_multi_slave(mod, io)) 286 if (rsnd_ssi_is_multi_slave(mod, io))
287 return 0; 287 return 0;
288 288
289 if (ssi->usrcnt > 1) { 289 if (ssi->usrcnt > 0) {
290 if (ssi->rate != rate) { 290 if (ssi->rate != rate) {
291 dev_err(dev, "SSI parent/child should use same rate\n"); 291 dev_err(dev, "SSI parent/child should use same rate\n");
292 return -EINVAL; 292 return -EINVAL;
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index c5934adcfd01..c74991dd18ab 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -79,7 +79,7 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
79 break; 79 break;
80 case 9: 80 case 9:
81 for (i = 0; i < 4; i++) 81 for (i = 0; i < 4; i++)
82 rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << (id * 4)); 82 rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << 4);
83 break; 83 break;
84 } 84 }
85 85
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index aae450ba4f08..50617db05c46 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -735,12 +735,17 @@ static struct snd_soc_component *soc_find_component(
735 const struct device_node *of_node, const char *name) 735 const struct device_node *of_node, const char *name)
736{ 736{
737 struct snd_soc_component *component; 737 struct snd_soc_component *component;
738 struct device_node *component_of_node;
738 739
739 lockdep_assert_held(&client_mutex); 740 lockdep_assert_held(&client_mutex);
740 741
741 for_each_component(component) { 742 for_each_component(component) {
742 if (of_node) { 743 if (of_node) {
743 if (component->dev->of_node == of_node) 744 component_of_node = component->dev->of_node;
745 if (!component_of_node && component->dev->parent)
746 component_of_node = component->dev->parent->of_node;
747
748 if (component_of_node == of_node)
744 return component; 749 return component;
745 } else if (name && strcmp(component->name, name) == 0) { 750 } else if (name && strcmp(component->name, name) == 0) {
746 return component; 751 return component;
@@ -951,7 +956,7 @@ static void soc_remove_dai(struct snd_soc_dai *dai, int order)
951{ 956{
952 int err; 957 int err;
953 958
954 if (!dai || !dai->probed || 959 if (!dai || !dai->probed || !dai->driver ||
955 dai->driver->remove_order != order) 960 dai->driver->remove_order != order)
956 return; 961 return;
957 962
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 2c4c13419539..20bad755888b 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -70,12 +70,16 @@ static int dapm_up_seq[] = {
70 [snd_soc_dapm_clock_supply] = 1, 70 [snd_soc_dapm_clock_supply] = 1,
71 [snd_soc_dapm_supply] = 2, 71 [snd_soc_dapm_supply] = 2,
72 [snd_soc_dapm_micbias] = 3, 72 [snd_soc_dapm_micbias] = 3,
73 [snd_soc_dapm_vmid] = 3,
73 [snd_soc_dapm_dai_link] = 2, 74 [snd_soc_dapm_dai_link] = 2,
74 [snd_soc_dapm_dai_in] = 4, 75 [snd_soc_dapm_dai_in] = 4,
75 [snd_soc_dapm_dai_out] = 4, 76 [snd_soc_dapm_dai_out] = 4,
76 [snd_soc_dapm_aif_in] = 4, 77 [snd_soc_dapm_aif_in] = 4,
77 [snd_soc_dapm_aif_out] = 4, 78 [snd_soc_dapm_aif_out] = 4,
78 [snd_soc_dapm_mic] = 5, 79 [snd_soc_dapm_mic] = 5,
80 [snd_soc_dapm_siggen] = 5,
81 [snd_soc_dapm_input] = 5,
82 [snd_soc_dapm_output] = 5,
79 [snd_soc_dapm_mux] = 6, 83 [snd_soc_dapm_mux] = 6,
80 [snd_soc_dapm_demux] = 6, 84 [snd_soc_dapm_demux] = 6,
81 [snd_soc_dapm_dac] = 7, 85 [snd_soc_dapm_dac] = 7,
@@ -83,11 +87,19 @@ static int dapm_up_seq[] = {
83 [snd_soc_dapm_mixer] = 8, 87 [snd_soc_dapm_mixer] = 8,
84 [snd_soc_dapm_mixer_named_ctl] = 8, 88 [snd_soc_dapm_mixer_named_ctl] = 8,
85 [snd_soc_dapm_pga] = 9, 89 [snd_soc_dapm_pga] = 9,
90 [snd_soc_dapm_buffer] = 9,
91 [snd_soc_dapm_scheduler] = 9,
92 [snd_soc_dapm_effect] = 9,
93 [snd_soc_dapm_src] = 9,
94 [snd_soc_dapm_asrc] = 9,
95 [snd_soc_dapm_encoder] = 9,
96 [snd_soc_dapm_decoder] = 9,
86 [snd_soc_dapm_adc] = 10, 97 [snd_soc_dapm_adc] = 10,
87 [snd_soc_dapm_out_drv] = 11, 98 [snd_soc_dapm_out_drv] = 11,
88 [snd_soc_dapm_hp] = 11, 99 [snd_soc_dapm_hp] = 11,
89 [snd_soc_dapm_spk] = 11, 100 [snd_soc_dapm_spk] = 11,
90 [snd_soc_dapm_line] = 11, 101 [snd_soc_dapm_line] = 11,
102 [snd_soc_dapm_sink] = 11,
91 [snd_soc_dapm_kcontrol] = 12, 103 [snd_soc_dapm_kcontrol] = 12,
92 [snd_soc_dapm_post] = 13, 104 [snd_soc_dapm_post] = 13,
93}; 105};
@@ -100,13 +112,25 @@ static int dapm_down_seq[] = {
100 [snd_soc_dapm_spk] = 3, 112 [snd_soc_dapm_spk] = 3,
101 [snd_soc_dapm_line] = 3, 113 [snd_soc_dapm_line] = 3,
102 [snd_soc_dapm_out_drv] = 3, 114 [snd_soc_dapm_out_drv] = 3,
115 [snd_soc_dapm_sink] = 3,
103 [snd_soc_dapm_pga] = 4, 116 [snd_soc_dapm_pga] = 4,
117 [snd_soc_dapm_buffer] = 4,
118 [snd_soc_dapm_scheduler] = 4,
119 [snd_soc_dapm_effect] = 4,
120 [snd_soc_dapm_src] = 4,
121 [snd_soc_dapm_asrc] = 4,
122 [snd_soc_dapm_encoder] = 4,
123 [snd_soc_dapm_decoder] = 4,
104 [snd_soc_dapm_switch] = 5, 124 [snd_soc_dapm_switch] = 5,
105 [snd_soc_dapm_mixer_named_ctl] = 5, 125 [snd_soc_dapm_mixer_named_ctl] = 5,
106 [snd_soc_dapm_mixer] = 5, 126 [snd_soc_dapm_mixer] = 5,
107 [snd_soc_dapm_dac] = 6, 127 [snd_soc_dapm_dac] = 6,
108 [snd_soc_dapm_mic] = 7, 128 [snd_soc_dapm_mic] = 7,
129 [snd_soc_dapm_siggen] = 7,
130 [snd_soc_dapm_input] = 7,
131 [snd_soc_dapm_output] = 7,
109 [snd_soc_dapm_micbias] = 8, 132 [snd_soc_dapm_micbias] = 8,
133 [snd_soc_dapm_vmid] = 8,
110 [snd_soc_dapm_mux] = 9, 134 [snd_soc_dapm_mux] = 9,
111 [snd_soc_dapm_demux] = 9, 135 [snd_soc_dapm_demux] = 9,
112 [snd_soc_dapm_aif_in] = 10, 136 [snd_soc_dapm_aif_in] = 10,
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 045ef136903d..731b963b6995 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -502,6 +502,7 @@ static void remove_dai(struct snd_soc_component *comp,
502{ 502{
503 struct snd_soc_dai_driver *dai_drv = 503 struct snd_soc_dai_driver *dai_drv =
504 container_of(dobj, struct snd_soc_dai_driver, dobj); 504 container_of(dobj, struct snd_soc_dai_driver, dobj);
505 struct snd_soc_dai *dai;
505 506
506 if (pass != SOC_TPLG_PASS_PCM_DAI) 507 if (pass != SOC_TPLG_PASS_PCM_DAI)
507 return; 508 return;
@@ -509,6 +510,10 @@ static void remove_dai(struct snd_soc_component *comp,
509 if (dobj->ops && dobj->ops->dai_unload) 510 if (dobj->ops && dobj->ops->dai_unload)
510 dobj->ops->dai_unload(comp, dobj); 511 dobj->ops->dai_unload(comp, dobj);
511 512
513 list_for_each_entry(dai, &comp->dai_list, list)
514 if (dai->driver == dai_drv)
515 dai->driver = NULL;
516
512 kfree(dai_drv->name); 517 kfree(dai_drv->name);
513 list_del(&dobj->list); 518 list_del(&dobj->list);
514 kfree(dai_drv); 519 kfree(dai_drv);
@@ -2482,6 +2487,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
2482 struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id) 2487 struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id)
2483{ 2488{
2484 struct soc_tplg tplg; 2489 struct soc_tplg tplg;
2490 int ret;
2485 2491
2486 /* setup parsing context */ 2492 /* setup parsing context */
2487 memset(&tplg, 0, sizeof(tplg)); 2493 memset(&tplg, 0, sizeof(tplg));
@@ -2495,7 +2501,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
2495 tplg.bytes_ext_ops = ops->bytes_ext_ops; 2501 tplg.bytes_ext_ops = ops->bytes_ext_ops;
2496 tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count; 2502 tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
2497 2503
2498 return soc_tplg_load(&tplg); 2504 ret = soc_tplg_load(&tplg);
2505 /* free the created components if fail to load topology */
2506 if (ret)
2507 snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL);
2508
2509 return ret;
2499} 2510}
2500EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); 2511EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
2501 2512
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 382847154227..db114f3977e0 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -314,6 +314,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
314 return 0; 314 return 0;
315} 315}
316 316
317/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
318 * applies. Returns 1 if a quirk was found.
319 */
317static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, 320static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
318 struct usb_device *dev, 321 struct usb_device *dev,
319 struct usb_interface_descriptor *altsd, 322 struct usb_interface_descriptor *altsd,
@@ -384,7 +387,7 @@ add_sync_ep:
384 387
385 subs->data_endpoint->sync_master = subs->sync_endpoint; 388 subs->data_endpoint->sync_master = subs->sync_endpoint;
386 389
387 return 0; 390 return 1;
388} 391}
389 392
390static int set_sync_endpoint(struct snd_usb_substream *subs, 393static int set_sync_endpoint(struct snd_usb_substream *subs,
@@ -423,6 +426,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
423 if (err < 0) 426 if (err < 0)
424 return err; 427 return err;
425 428
429 /* endpoint set by quirk */
430 if (err > 0)
431 return 0;
432
426 if (altsd->bNumEndpoints < 2) 433 if (altsd->bNumEndpoints < 2)
427 return 0; 434 return 0;
428 435
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index fd92ce8388fc..57aaeaf8e192 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -15,6 +15,8 @@
15#include "../../arch/ia64/include/uapi/asm/bitsperlong.h" 15#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
16#elif defined(__riscv) 16#elif defined(__riscv)
17#include "../../arch/riscv/include/uapi/asm/bitsperlong.h" 17#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
18#elif defined(__alpha__)
19#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
18#else 20#else
19#include <asm-generic/bitsperlong.h> 21#include <asm-generic/bitsperlong.h>
20#endif 22#endif
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index 147e34cfceb7..02d7c871862a 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -474,6 +474,16 @@ static void test_lpm_delete(void)
474 assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 && 474 assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
475 errno == ENOENT); 475 errno == ENOENT);
476 476
477 key->prefixlen = 30; // unused prefix so far
478 inet_pton(AF_INET, "192.255.0.0", key->data);
479 assert(bpf_map_delete_elem(map_fd, key) == -1 &&
480 errno == ENOENT);
481
482 key->prefixlen = 16; // same prefix as the root node
483 inet_pton(AF_INET, "192.255.0.0", key->data);
484 assert(bpf_map_delete_elem(map_fd, key) == -1 &&
485 errno == ENOENT);
486
477 /* assert initial lookup */ 487 /* assert initial lookup */
478 key->prefixlen = 32; 488 key->prefixlen = 32;
479 inet_pton(AF_INET, "192.168.0.1", key->data); 489 inet_pton(AF_INET, "192.168.0.1", key->data);
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 802b4af18729..1080ff55a788 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -388,6 +388,7 @@ fib_carrier_unicast_test()
388 388
389 set -e 389 set -e
390 $IP link set dev dummy0 carrier off 390 $IP link set dev dummy0 carrier off
391 sleep 1
391 set +e 392 set +e
392 393
393 echo " Carrier down" 394 echo " Carrier down"
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
index 9050eeea5f5f..1de8bd8ccf5d 100644
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -9,6 +9,3 @@ all: $(TEST_PROGS)
9top_srcdir = ../../../../.. 9top_srcdir = ../../../../..
10KSFT_KHDR_INSTALL := 1 10KSFT_KHDR_INSTALL := 1
11include ../../lib.mk 11include ../../lib.mk
12
13clean:
14 rm -fr $(TEST_GEN_FILES)
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 9e350fd34504..9c486fad3f9f 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
626 /* Awaken to handle a signal, request we sleep again later. */ 626 /* Awaken to handle a signal, request we sleep again later. */
627 kvm_make_request(KVM_REQ_SLEEP, vcpu); 627 kvm_make_request(KVM_REQ_SLEEP, vcpu);
628 } 628 }
629
630 /*
631 * Make sure we will observe a potential reset request if we've
632 * observed a change to the power state. Pairs with the smp_wmb() in
633 * kvm_psci_vcpu_on().
634 */
635 smp_rmb();
629} 636}
630 637
631static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) 638static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
639 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) 646 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
640 vcpu_req_sleep(vcpu); 647 vcpu_req_sleep(vcpu);
641 648
649 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
650 kvm_reset_vcpu(vcpu);
651
642 /* 652 /*
643 * Clear IRQ_PENDING requests that were made to guarantee 653 * Clear IRQ_PENDING requests that were made to guarantee
644 * that a VCPU sees new virtual interrupts. 654 * that a VCPU sees new virtual interrupts.
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index fbdf3ac2f001..30251e288629 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1695 1695
1696 vma_pagesize = vma_kernel_pagesize(vma); 1696 vma_pagesize = vma_kernel_pagesize(vma);
1697 /* 1697 /*
1698 * PUD level may not exist for a VM but PMD is guaranteed to 1698 * The stage2 has a minimum of 2 level table (For arm64 see
1699 * exist. 1699 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1700 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1701 * As for PUD huge maps, we must make sure that we have at least
1702 * 3 levels, i.e, PMD is not folded.
1700 */ 1703 */
1701 if ((vma_pagesize == PMD_SIZE || 1704 if ((vma_pagesize == PMD_SIZE ||
1702 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) && 1705 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
1703 !force_pte) { 1706 !force_pte) {
1704 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 1707 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1705 } 1708 }
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 9b73d3ad918a..34d08ee63747 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
104 104
105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) 105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
106{ 106{
107 struct vcpu_reset_state *reset_state;
107 struct kvm *kvm = source_vcpu->kvm; 108 struct kvm *kvm = source_vcpu->kvm;
108 struct kvm_vcpu *vcpu = NULL; 109 struct kvm_vcpu *vcpu = NULL;
109 struct swait_queue_head *wq;
110 unsigned long cpu_id; 110 unsigned long cpu_id;
111 unsigned long context_id;
112 phys_addr_t target_pc;
113 111
114 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; 112 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
115 if (vcpu_mode_is_32bit(source_vcpu)) 113 if (vcpu_mode_is_32bit(source_vcpu))
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
130 return PSCI_RET_INVALID_PARAMS; 128 return PSCI_RET_INVALID_PARAMS;
131 } 129 }
132 130
133 target_pc = smccc_get_arg2(source_vcpu); 131 reset_state = &vcpu->arch.reset_state;
134 context_id = smccc_get_arg3(source_vcpu);
135 132
136 kvm_reset_vcpu(vcpu); 133 reset_state->pc = smccc_get_arg2(source_vcpu);
137
138 /* Gracefully handle Thumb2 entry point */
139 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
140 target_pc &= ~((phys_addr_t) 1);
141 vcpu_set_thumb(vcpu);
142 }
143 134
144 /* Propagate caller endianness */ 135 /* Propagate caller endianness */
145 if (kvm_vcpu_is_be(source_vcpu)) 136 reset_state->be = kvm_vcpu_is_be(source_vcpu);
146 kvm_vcpu_set_be(vcpu);
147 137
148 *vcpu_pc(vcpu) = target_pc;
149 /* 138 /*
150 * NOTE: We always update r0 (or x0) because for PSCI v0.1 139 * NOTE: We always update r0 (or x0) because for PSCI v0.1
151 * the general puspose registers are undefined upon CPU_ON. 140 * the general puspose registers are undefined upon CPU_ON.
152 */ 141 */
153 smccc_set_retval(vcpu, context_id, 0, 0, 0); 142 reset_state->r0 = smccc_get_arg3(source_vcpu);
154 vcpu->arch.power_off = false; 143
155 smp_mb(); /* Make sure the above is visible */ 144 WRITE_ONCE(reset_state->reset, true);
145 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
156 146
157 wq = kvm_arch_vcpu_wq(vcpu); 147 /*
158 swake_up_one(wq); 148 * Make sure the reset request is observed if the change to
149 * power_state is observed.
150 */
151 smp_wmb();
152
153 vcpu->arch.power_off = false;
154 kvm_vcpu_wake_up(vcpu);
159 155
160 return PSCI_RET_SUCCESS; 156 return PSCI_RET_SUCCESS;
161} 157}
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 07aa900bac56..1f62f2b8065d 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
251 return 0; 251 return 0;
252 } 252 }
253 253
254 spin_lock_irqsave(&irq->irq_lock, flags); 254 raw_spin_lock_irqsave(&irq->irq_lock, flags);
255 print_irq_state(s, irq, vcpu); 255 print_irq_state(s, irq, vcpu);
256 spin_unlock_irqrestore(&irq->irq_lock, flags); 256 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
257 257
258 vgic_put_irq(kvm, irq); 258 vgic_put_irq(kvm, irq);
259 return 0; 259 return 0;
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index c0c0b88af1d5..3bdb31eaed64 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
64 struct vgic_dist *dist = &kvm->arch.vgic; 64 struct vgic_dist *dist = &kvm->arch.vgic;
65 65
66 INIT_LIST_HEAD(&dist->lpi_list_head); 66 INIT_LIST_HEAD(&dist->lpi_list_head);
67 spin_lock_init(&dist->lpi_list_lock); 67 raw_spin_lock_init(&dist->lpi_list_lock);
68} 68}
69 69
70/* CREATION */ 70/* CREATION */
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
171 171
172 irq->intid = i + VGIC_NR_PRIVATE_IRQS; 172 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
173 INIT_LIST_HEAD(&irq->ap_list); 173 INIT_LIST_HEAD(&irq->ap_list);
174 spin_lock_init(&irq->irq_lock); 174 raw_spin_lock_init(&irq->irq_lock);
175 irq->vcpu = NULL; 175 irq->vcpu = NULL;
176 irq->target_vcpu = vcpu0; 176 irq->target_vcpu = vcpu0;
177 kref_init(&irq->refcount); 177 kref_init(&irq->refcount);
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
206 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; 206 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
207 207
208 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 208 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
209 spin_lock_init(&vgic_cpu->ap_list_lock); 209 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
210 210
211 /* 211 /*
212 * Enable and configure all SGIs to be edge-triggered and 212 * Enable and configure all SGIs to be edge-triggered and
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
217 217
218 INIT_LIST_HEAD(&irq->ap_list); 218 INIT_LIST_HEAD(&irq->ap_list);
219 spin_lock_init(&irq->irq_lock); 219 raw_spin_lock_init(&irq->irq_lock);
220 irq->intid = i; 220 irq->intid = i;
221 irq->vcpu = NULL; 221 irq->vcpu = NULL;
222 irq->target_vcpu = vcpu; 222 irq->target_vcpu = vcpu;
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
231 irq->config = VGIC_CONFIG_LEVEL; 231 irq->config = VGIC_CONFIG_LEVEL;
232 } 232 }
233 233
234 /*
235 * GICv3 can only be created via the KVM_DEVICE_CREATE API and
236 * so we always know the emulation type at this point as it's
237 * either explicitly configured as GICv3, or explicitly
238 * configured as GICv2, or not configured yet which also
239 * implies GICv2.
240 */
241 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 234 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
242 irq->group = 1; 235 irq->group = 1;
243 else 236 else
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm)
281{ 274{
282 struct vgic_dist *dist = &kvm->arch.vgic; 275 struct vgic_dist *dist = &kvm->arch.vgic;
283 struct kvm_vcpu *vcpu; 276 struct kvm_vcpu *vcpu;
284 int ret = 0, i; 277 int ret = 0, i, idx;
285 278
286 if (vgic_initialized(kvm)) 279 if (vgic_initialized(kvm))
287 return 0; 280 return 0;
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm)
298 if (ret) 291 if (ret)
299 goto out; 292 goto out;
300 293
294 /* Initialize groups on CPUs created before the VGIC type was known */
295 kvm_for_each_vcpu(idx, vcpu, kvm) {
296 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
297
298 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
299 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
300 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
301 irq->group = 1;
302 else
303 irq->group = 0;
304 }
305 }
306
301 if (vgic_has_its(kvm)) { 307 if (vgic_has_its(kvm)) {
302 ret = vgic_v4_init(kvm); 308 ret = vgic_v4_init(kvm);
303 if (ret) 309 if (ret)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index eb2a390a6c86..ab3f47745d9c 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
65 65
66 INIT_LIST_HEAD(&irq->lpi_list); 66 INIT_LIST_HEAD(&irq->lpi_list);
67 INIT_LIST_HEAD(&irq->ap_list); 67 INIT_LIST_HEAD(&irq->ap_list);
68 spin_lock_init(&irq->irq_lock); 68 raw_spin_lock_init(&irq->irq_lock);
69 69
70 irq->config = VGIC_CONFIG_EDGE; 70 irq->config = VGIC_CONFIG_EDGE;
71 kref_init(&irq->refcount); 71 kref_init(&irq->refcount);
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
73 irq->target_vcpu = vcpu; 73 irq->target_vcpu = vcpu;
74 irq->group = 1; 74 irq->group = 1;
75 75
76 spin_lock_irqsave(&dist->lpi_list_lock, flags); 76 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
77 77
78 /* 78 /*
79 * There could be a race with another vgic_add_lpi(), so we need to 79 * There could be a race with another vgic_add_lpi(), so we need to
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
101 dist->lpi_list_count++; 101 dist->lpi_list_count++;
102 102
103out_unlock: 103out_unlock:
104 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 104 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
105 105
106 /* 106 /*
107 * We "cache" the configuration table entries in our struct vgic_irq's. 107 * We "cache" the configuration table entries in our struct vgic_irq's.
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
287 if (ret) 287 if (ret)
288 return ret; 288 return ret;
289 289
290 spin_lock_irqsave(&irq->irq_lock, flags); 290 raw_spin_lock_irqsave(&irq->irq_lock, flags);
291 291
292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { 292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
293 irq->priority = LPI_PROP_PRIORITY(prop); 293 irq->priority = LPI_PROP_PRIORITY(prop);
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
299 } 299 }
300 } 300 }
301 301
302 spin_unlock_irqrestore(&irq->irq_lock, flags); 302 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
303 303
304 if (irq->hw) 304 if (irq->hw)
305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); 305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
332 if (!intids) 332 if (!intids)
333 return -ENOMEM; 333 return -ENOMEM;
334 334
335 spin_lock_irqsave(&dist->lpi_list_lock, flags); 335 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
337 if (i == irq_count) 337 if (i == irq_count)
338 break; 338 break;
@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
341 continue; 341 continue;
342 intids[i++] = irq->intid; 342 intids[i++] = irq->intid;
343 } 343 }
344 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 344 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
345 345
346 *intid_ptr = intids; 346 *intid_ptr = intids;
347 return i; 347 return i;
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
352 int ret = 0; 352 int ret = 0;
353 unsigned long flags; 353 unsigned long flags;
354 354
355 spin_lock_irqsave(&irq->irq_lock, flags); 355 raw_spin_lock_irqsave(&irq->irq_lock, flags);
356 irq->target_vcpu = vcpu; 356 irq->target_vcpu = vcpu;
357 spin_unlock_irqrestore(&irq->irq_lock, flags); 357 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
358 358
359 if (irq->hw) { 359 if (irq->hw) {
360 struct its_vlpi_map map; 360 struct its_vlpi_map map;
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
455 } 455 }
456 456
457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); 457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
458 spin_lock_irqsave(&irq->irq_lock, flags); 458 raw_spin_lock_irqsave(&irq->irq_lock, flags);
459 irq->pending_latch = pendmask & (1U << bit_nr); 459 irq->pending_latch = pendmask & (1U << bit_nr);
460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
461 vgic_put_irq(vcpu->kvm, irq); 461 vgic_put_irq(vcpu->kvm, irq);
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
612 return irq_set_irqchip_state(irq->host_irq, 612 return irq_set_irqchip_state(irq->host_irq,
613 IRQCHIP_STATE_PENDING, true); 613 IRQCHIP_STATE_PENDING, true);
614 614
615 spin_lock_irqsave(&irq->irq_lock, flags); 615 raw_spin_lock_irqsave(&irq->irq_lock, flags);
616 irq->pending_latch = true; 616 irq->pending_latch = true;
617 vgic_queue_irq_unlock(kvm, irq, flags); 617 vgic_queue_irq_unlock(kvm, irq, flags);
618 618
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 738b65d2d0e7..b535fffc7400 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
147 147
148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); 148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
149 149
150 spin_lock_irqsave(&irq->irq_lock, flags); 150 raw_spin_lock_irqsave(&irq->irq_lock, flags);
151 irq->pending_latch = true; 151 irq->pending_latch = true;
152 irq->source |= 1U << source_vcpu->vcpu_id; 152 irq->source |= 1U << source_vcpu->vcpu_id;
153 153
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); 191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
192 int target; 192 int target;
193 193
194 spin_lock_irqsave(&irq->irq_lock, flags); 194 raw_spin_lock_irqsave(&irq->irq_lock, flags);
195 195
196 irq->targets = (val >> (i * 8)) & cpu_mask; 196 irq->targets = (val >> (i * 8)) & cpu_mask;
197 target = irq->targets ? __ffs(irq->targets) : 0; 197 target = irq->targets ? __ffs(irq->targets) : 0;
198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); 198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
199 199
200 spin_unlock_irqrestore(&irq->irq_lock, flags); 200 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
201 vgic_put_irq(vcpu->kvm, irq); 201 vgic_put_irq(vcpu->kvm, irq);
202 } 202 }
203} 203}
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
230 for (i = 0; i < len; i++) { 230 for (i = 0; i < len; i++) {
231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
232 232
233 spin_lock_irqsave(&irq->irq_lock, flags); 233 raw_spin_lock_irqsave(&irq->irq_lock, flags);
234 234
235 irq->source &= ~((val >> (i * 8)) & 0xff); 235 irq->source &= ~((val >> (i * 8)) & 0xff);
236 if (!irq->source) 236 if (!irq->source)
237 irq->pending_latch = false; 237 irq->pending_latch = false;
238 238
239 spin_unlock_irqrestore(&irq->irq_lock, flags); 239 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
240 vgic_put_irq(vcpu->kvm, irq); 240 vgic_put_irq(vcpu->kvm, irq);
241 } 241 }
242} 242}
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
252 for (i = 0; i < len; i++) { 252 for (i = 0; i < len; i++) {
253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
254 254
255 spin_lock_irqsave(&irq->irq_lock, flags); 255 raw_spin_lock_irqsave(&irq->irq_lock, flags);
256 256
257 irq->source |= (val >> (i * 8)) & 0xff; 257 irq->source |= (val >> (i * 8)) & 0xff;
258 258
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
260 irq->pending_latch = true; 260 irq->pending_latch = true;
261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
262 } else { 262 } else {
263 spin_unlock_irqrestore(&irq->irq_lock, flags); 263 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
264 } 264 }
265 vgic_put_irq(vcpu->kvm, irq); 265 vgic_put_irq(vcpu->kvm, irq);
266 } 266 }
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index b3d1f0985117..4a12322bf7df 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
169 if (!irq) 169 if (!irq)
170 return; 170 return;
171 171
172 spin_lock_irqsave(&irq->irq_lock, flags); 172 raw_spin_lock_irqsave(&irq->irq_lock, flags);
173 173
174 /* We only care about and preserve Aff0, Aff1 and Aff2. */ 174 /* We only care about and preserve Aff0, Aff1 and Aff2. */
175 irq->mpidr = val & GENMASK(23, 0); 175 irq->mpidr = val & GENMASK(23, 0);
176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); 176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
177 177
178 spin_unlock_irqrestore(&irq->irq_lock, flags); 178 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
179 vgic_put_irq(vcpu->kvm, irq); 179 vgic_put_irq(vcpu->kvm, irq);
180} 180}
181 181
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
281 for (i = 0; i < len * 8; i++) { 281 for (i = 0; i < len * 8; i++) {
282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
283 283
284 spin_lock_irqsave(&irq->irq_lock, flags); 284 raw_spin_lock_irqsave(&irq->irq_lock, flags);
285 if (test_bit(i, &val)) { 285 if (test_bit(i, &val)) {
286 /* 286 /*
287 * pending_latch is set irrespective of irq type 287 * pending_latch is set irrespective of irq type
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
293 } else { 293 } else {
294 irq->pending_latch = false; 294 irq->pending_latch = false;
295 spin_unlock_irqrestore(&irq->irq_lock, flags); 295 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
296 } 296 }
297 297
298 vgic_put_irq(vcpu->kvm, irq); 298 vgic_put_irq(vcpu->kvm, irq);
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
957 957
958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); 958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
959 959
960 spin_lock_irqsave(&irq->irq_lock, flags); 960 raw_spin_lock_irqsave(&irq->irq_lock, flags);
961 961
962 /* 962 /*
963 * An access targetting Group0 SGIs can only generate 963 * An access targetting Group0 SGIs can only generate
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
968 irq->pending_latch = true; 968 irq->pending_latch = true;
969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
970 } else { 970 } else {
971 spin_unlock_irqrestore(&irq->irq_lock, flags); 971 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
972 } 972 }
973 973
974 vgic_put_irq(vcpu->kvm, irq); 974 vgic_put_irq(vcpu->kvm, irq);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ceeda7e04a4d..7de42fba05b5 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
77 for (i = 0; i < len * 8; i++) { 77 for (i = 0; i < len * 8; i++) {
78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
79 79
80 spin_lock_irqsave(&irq->irq_lock, flags); 80 raw_spin_lock_irqsave(&irq->irq_lock, flags);
81 irq->group = !!(val & BIT(i)); 81 irq->group = !!(val & BIT(i));
82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
83 83
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
120 for_each_set_bit(i, &val, len * 8) { 120 for_each_set_bit(i, &val, len * 8) {
121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
122 122
123 spin_lock_irqsave(&irq->irq_lock, flags); 123 raw_spin_lock_irqsave(&irq->irq_lock, flags);
124 irq->enabled = true; 124 irq->enabled = true;
125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
126 126
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
139 for_each_set_bit(i, &val, len * 8) { 139 for_each_set_bit(i, &val, len * 8) {
140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
141 141
142 spin_lock_irqsave(&irq->irq_lock, flags); 142 raw_spin_lock_irqsave(&irq->irq_lock, flags);
143 143
144 irq->enabled = false; 144 irq->enabled = false;
145 145
146 spin_unlock_irqrestore(&irq->irq_lock, flags); 146 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
147 vgic_put_irq(vcpu->kvm, irq); 147 vgic_put_irq(vcpu->kvm, irq);
148 } 148 }
149} 149}
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
161 unsigned long flags; 161 unsigned long flags;
162 162
163 spin_lock_irqsave(&irq->irq_lock, flags); 163 raw_spin_lock_irqsave(&irq->irq_lock, flags);
164 if (irq_is_pending(irq)) 164 if (irq_is_pending(irq))
165 value |= (1U << i); 165 value |= (1U << i);
166 spin_unlock_irqrestore(&irq->irq_lock, flags); 166 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
167 167
168 vgic_put_irq(vcpu->kvm, irq); 168 vgic_put_irq(vcpu->kvm, irq);
169 } 169 }
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215 for_each_set_bit(i, &val, len * 8) { 215 for_each_set_bit(i, &val, len * 8) {
216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
217 217
218 spin_lock_irqsave(&irq->irq_lock, flags); 218 raw_spin_lock_irqsave(&irq->irq_lock, flags);
219 if (irq->hw) 219 if (irq->hw)
220 vgic_hw_irq_spending(vcpu, irq, is_uaccess); 220 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
221 else 221 else
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
262 for_each_set_bit(i, &val, len * 8) { 262 for_each_set_bit(i, &val, len * 8) {
263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
264 264
265 spin_lock_irqsave(&irq->irq_lock, flags); 265 raw_spin_lock_irqsave(&irq->irq_lock, flags);
266 266
267 if (irq->hw) 267 if (irq->hw)
268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess); 268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
269 else 269 else
270 irq->pending_latch = false; 270 irq->pending_latch = false;
271 271
272 spin_unlock_irqrestore(&irq->irq_lock, flags); 272 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
273 vgic_put_irq(vcpu->kvm, irq); 273 vgic_put_irq(vcpu->kvm, irq);
274 } 274 }
275} 275}
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
311 unsigned long flags; 311 unsigned long flags;
312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); 312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
313 313
314 spin_lock_irqsave(&irq->irq_lock, flags); 314 raw_spin_lock_irqsave(&irq->irq_lock, flags);
315 315
316 if (irq->hw) { 316 if (irq->hw) {
317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); 317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
342 if (irq->active) 342 if (irq->active)
343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
344 else 344 else
345 spin_unlock_irqrestore(&irq->irq_lock, flags); 345 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
346} 346}
347 347
348/* 348/*
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
485 for (i = 0; i < len; i++) { 485 for (i = 0; i < len; i++) {
486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
487 487
488 spin_lock_irqsave(&irq->irq_lock, flags); 488 raw_spin_lock_irqsave(&irq->irq_lock, flags);
489 /* Narrow the priority range to what we actually support */ 489 /* Narrow the priority range to what we actually support */
490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); 490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
491 spin_unlock_irqrestore(&irq->irq_lock, flags); 491 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
492 492
493 vgic_put_irq(vcpu->kvm, irq); 493 vgic_put_irq(vcpu->kvm, irq);
494 } 494 }
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
534 continue; 534 continue;
535 535
536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
537 spin_lock_irqsave(&irq->irq_lock, flags); 537 raw_spin_lock_irqsave(&irq->irq_lock, flags);
538 538
539 if (test_bit(i * 2 + 1, &val)) 539 if (test_bit(i * 2 + 1, &val))
540 irq->config = VGIC_CONFIG_EDGE; 540 irq->config = VGIC_CONFIG_EDGE;
541 else 541 else
542 irq->config = VGIC_CONFIG_LEVEL; 542 irq->config = VGIC_CONFIG_LEVEL;
543 543
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 544 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 545 vgic_put_irq(vcpu->kvm, irq);
546 } 546 }
547} 547}
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
590 * restore irq config before line level. 590 * restore irq config before line level.
591 */ 591 */
592 new_level = !!(val & (1U << i)); 592 new_level = !!(val & (1U << i));
593 spin_lock_irqsave(&irq->irq_lock, flags); 593 raw_spin_lock_irqsave(&irq->irq_lock, flags);
594 irq->line_level = new_level; 594 irq->line_level = new_level;
595 if (new_level) 595 if (new_level)
596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
597 else 597 else
598 spin_unlock_irqrestore(&irq->irq_lock, flags); 598 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
599 599
600 vgic_put_irq(vcpu->kvm, irq); 600 vgic_put_irq(vcpu->kvm, irq);
601 } 601 }
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892abd7dc..d91a8938aa7c 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
84 84
85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
86 86
87 spin_lock(&irq->irq_lock); 87 raw_spin_lock(&irq->irq_lock);
88 88
89 /* Always preserve the active bit */ 89 /* Always preserve the active bit */
90 irq->active = !!(val & GICH_LR_ACTIVE_BIT); 90 irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
127 vgic_irq_set_phys_active(irq, false); 127 vgic_irq_set_phys_active(irq, false);
128 } 128 }
129 129
130 spin_unlock(&irq->irq_lock); 130 raw_spin_unlock(&irq->irq_lock);
131 vgic_put_irq(vcpu->kvm, irq); 131 vgic_put_irq(vcpu->kvm, irq);
132 } 132 }
133 133
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9c0dd234ebe8..4ee0aeb9a905 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
76 if (!irq) /* An LPI could have been unmapped. */ 76 if (!irq) /* An LPI could have been unmapped. */
77 continue; 77 continue;
78 78
79 spin_lock(&irq->irq_lock); 79 raw_spin_lock(&irq->irq_lock);
80 80
81 /* Always preserve the active bit */ 81 /* Always preserve the active bit */
82 irq->active = !!(val & ICH_LR_ACTIVE_BIT); 82 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
119 vgic_irq_set_phys_active(irq, false); 119 vgic_irq_set_phys_active(irq, false);
120 } 120 }
121 121
122 spin_unlock(&irq->irq_lock); 122 raw_spin_unlock(&irq->irq_lock);
123 vgic_put_irq(vcpu->kvm, irq); 123 vgic_put_irq(vcpu->kvm, irq);
124 } 124 }
125 125
@@ -347,9 +347,9 @@ retry:
347 347
348 status = val & (1 << bit_nr); 348 status = val & (1 << bit_nr);
349 349
350 spin_lock_irqsave(&irq->irq_lock, flags); 350 raw_spin_lock_irqsave(&irq->irq_lock, flags);
351 if (irq->target_vcpu != vcpu) { 351 if (irq->target_vcpu != vcpu) {
352 spin_unlock_irqrestore(&irq->irq_lock, flags); 352 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
353 goto retry; 353 goto retry;
354 } 354 }
355 irq->pending_latch = status; 355 irq->pending_latch = status;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 870b1185173b..abd9c7352677 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
54 * When taking more than one ap_list_lock at the same time, always take the 54 * When taking more than one ap_list_lock at the same time, always take the
55 * lowest numbered VCPU's ap_list_lock first, so: 55 * lowest numbered VCPU's ap_list_lock first, so:
56 * vcpuX->vcpu_id < vcpuY->vcpu_id: 56 * vcpuX->vcpu_id < vcpuY->vcpu_id:
57 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); 57 * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
58 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); 58 * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
59 * 59 *
60 * Since the VGIC must support injecting virtual interrupts from ISRs, we have 60 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
61 * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer 61 * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
62 * spinlocks for any lock that may be taken while injecting an interrupt. 62 * spinlocks for any lock that may be taken while injecting an interrupt.
63 */ 63 */
64 64
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
72 struct vgic_irq *irq = NULL; 72 struct vgic_irq *irq = NULL;
73 unsigned long flags; 73 unsigned long flags;
74 74
75 spin_lock_irqsave(&dist->lpi_list_lock, flags); 75 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
76 76
77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
78 if (irq->intid != intid) 78 if (irq->intid != intid)
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
88 irq = NULL; 88 irq = NULL;
89 89
90out_unlock: 90out_unlock:
91 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 91 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
92 92
93 return irq; 93 return irq;
94} 94}
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
138 if (irq->intid < VGIC_MIN_LPI) 138 if (irq->intid < VGIC_MIN_LPI)
139 return; 139 return;
140 140
141 spin_lock_irqsave(&dist->lpi_list_lock, flags); 141 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
142 if (!kref_put(&irq->refcount, vgic_irq_release)) { 142 if (!kref_put(&irq->refcount, vgic_irq_release)) {
143 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 143 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
144 return; 144 return;
145 }; 145 };
146 146
147 list_del(&irq->lpi_list); 147 list_del(&irq->lpi_list);
148 dist->lpi_list_count--; 148 dist->lpi_list_count--;
149 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 149 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
150 150
151 kfree(irq); 151 kfree(irq);
152} 152}
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
244 bool penda, pendb; 244 bool penda, pendb;
245 int ret; 245 int ret;
246 246
247 spin_lock(&irqa->irq_lock); 247 raw_spin_lock(&irqa->irq_lock);
248 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 248 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
249 249
250 if (irqa->active || irqb->active) { 250 if (irqa->active || irqb->active) {
251 ret = (int)irqb->active - (int)irqa->active; 251 ret = (int)irqb->active - (int)irqa->active;
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
263 /* Both pending and enabled, sort by priority */ 263 /* Both pending and enabled, sort by priority */
264 ret = irqa->priority - irqb->priority; 264 ret = irqa->priority - irqb->priority;
265out: 265out:
266 spin_unlock(&irqb->irq_lock); 266 raw_spin_unlock(&irqb->irq_lock);
267 spin_unlock(&irqa->irq_lock); 267 raw_spin_unlock(&irqa->irq_lock);
268 return ret; 268 return ret;
269} 269}
270 270
@@ -325,7 +325,7 @@ retry:
325 * not need to be inserted into an ap_list and there is also 325 * not need to be inserted into an ap_list and there is also
326 * no more work for us to do. 326 * no more work for us to do.
327 */ 327 */
328 spin_unlock_irqrestore(&irq->irq_lock, flags); 328 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
329 329
330 /* 330 /*
331 * We have to kick the VCPU here, because we could be 331 * We have to kick the VCPU here, because we could be
@@ -347,12 +347,12 @@ retry:
347 * We must unlock the irq lock to take the ap_list_lock where 347 * We must unlock the irq lock to take the ap_list_lock where
348 * we are going to insert this new pending interrupt. 348 * we are going to insert this new pending interrupt.
349 */ 349 */
350 spin_unlock_irqrestore(&irq->irq_lock, flags); 350 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
351 351
352 /* someone can do stuff here, which we re-check below */ 352 /* someone can do stuff here, which we re-check below */
353 353
354 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 354 raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
355 spin_lock(&irq->irq_lock); 355 raw_spin_lock(&irq->irq_lock);
356 356
357 /* 357 /*
358 * Did something change behind our backs? 358 * Did something change behind our backs?
@@ -367,10 +367,11 @@ retry:
367 */ 367 */
368 368
369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { 369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
370 spin_unlock(&irq->irq_lock); 370 raw_spin_unlock(&irq->irq_lock);
371 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 371 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
372 flags);
372 373
373 spin_lock_irqsave(&irq->irq_lock, flags); 374 raw_spin_lock_irqsave(&irq->irq_lock, flags);
374 goto retry; 375 goto retry;
375 } 376 }
376 377
@@ -382,8 +383,8 @@ retry:
382 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); 383 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
383 irq->vcpu = vcpu; 384 irq->vcpu = vcpu;
384 385
385 spin_unlock(&irq->irq_lock); 386 raw_spin_unlock(&irq->irq_lock);
386 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 387 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
387 388
388 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 389 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
389 kvm_vcpu_kick(vcpu); 390 kvm_vcpu_kick(vcpu);
@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
430 if (!irq) 431 if (!irq)
431 return -EINVAL; 432 return -EINVAL;
432 433
433 spin_lock_irqsave(&irq->irq_lock, flags); 434 raw_spin_lock_irqsave(&irq->irq_lock, flags);
434 435
435 if (!vgic_validate_injection(irq, level, owner)) { 436 if (!vgic_validate_injection(irq, level, owner)) {
436 /* Nothing to see here, move along... */ 437 /* Nothing to see here, move along... */
437 spin_unlock_irqrestore(&irq->irq_lock, flags); 438 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
438 vgic_put_irq(kvm, irq); 439 vgic_put_irq(kvm, irq);
439 return 0; 440 return 0;
440 } 441 }
@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
494 495
495 BUG_ON(!irq); 496 BUG_ON(!irq);
496 497
497 spin_lock_irqsave(&irq->irq_lock, flags); 498 raw_spin_lock_irqsave(&irq->irq_lock, flags);
498 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); 499 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
499 spin_unlock_irqrestore(&irq->irq_lock, flags); 500 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
500 vgic_put_irq(vcpu->kvm, irq); 501 vgic_put_irq(vcpu->kvm, irq);
501 502
502 return ret; 503 return ret;
@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
519 if (!irq->hw) 520 if (!irq->hw)
520 goto out; 521 goto out;
521 522
522 spin_lock_irqsave(&irq->irq_lock, flags); 523 raw_spin_lock_irqsave(&irq->irq_lock, flags);
523 irq->active = false; 524 irq->active = false;
524 irq->pending_latch = false; 525 irq->pending_latch = false;
525 irq->line_level = false; 526 irq->line_level = false;
526 spin_unlock_irqrestore(&irq->irq_lock, flags); 527 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
527out: 528out:
528 vgic_put_irq(vcpu->kvm, irq); 529 vgic_put_irq(vcpu->kvm, irq);
529} 530}
@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
539 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 540 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
540 BUG_ON(!irq); 541 BUG_ON(!irq);
541 542
542 spin_lock_irqsave(&irq->irq_lock, flags); 543 raw_spin_lock_irqsave(&irq->irq_lock, flags);
543 kvm_vgic_unmap_irq(irq); 544 kvm_vgic_unmap_irq(irq);
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 545 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 546 vgic_put_irq(vcpu->kvm, irq);
546 547
547 return 0; 548 return 0;
@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
571 return -EINVAL; 572 return -EINVAL;
572 573
573 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 574 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
574 spin_lock_irqsave(&irq->irq_lock, flags); 575 raw_spin_lock_irqsave(&irq->irq_lock, flags);
575 if (irq->owner && irq->owner != owner) 576 if (irq->owner && irq->owner != owner)
576 ret = -EEXIST; 577 ret = -EEXIST;
577 else 578 else
578 irq->owner = owner; 579 irq->owner = owner;
579 spin_unlock_irqrestore(&irq->irq_lock, flags); 580 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
580 581
581 return ret; 582 return ret;
582} 583}
@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
597 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 598 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
598 599
599retry: 600retry:
600 spin_lock(&vgic_cpu->ap_list_lock); 601 raw_spin_lock(&vgic_cpu->ap_list_lock);
601 602
602 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 603 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
603 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 604 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
604 bool target_vcpu_needs_kick = false; 605 bool target_vcpu_needs_kick = false;
605 606
606 spin_lock(&irq->irq_lock); 607 raw_spin_lock(&irq->irq_lock);
607 608
608 BUG_ON(vcpu != irq->vcpu); 609 BUG_ON(vcpu != irq->vcpu);
609 610
@@ -616,7 +617,7 @@ retry:
616 */ 617 */
617 list_del(&irq->ap_list); 618 list_del(&irq->ap_list);
618 irq->vcpu = NULL; 619 irq->vcpu = NULL;
619 spin_unlock(&irq->irq_lock); 620 raw_spin_unlock(&irq->irq_lock);
620 621
621 /* 622 /*
622 * This vgic_put_irq call matches the 623 * This vgic_put_irq call matches the
@@ -631,14 +632,14 @@ retry:
631 632
632 if (target_vcpu == vcpu) { 633 if (target_vcpu == vcpu) {
633 /* We're on the right CPU */ 634 /* We're on the right CPU */
634 spin_unlock(&irq->irq_lock); 635 raw_spin_unlock(&irq->irq_lock);
635 continue; 636 continue;
636 } 637 }
637 638
638 /* This interrupt looks like it has to be migrated. */ 639 /* This interrupt looks like it has to be migrated. */
639 640
640 spin_unlock(&irq->irq_lock); 641 raw_spin_unlock(&irq->irq_lock);
641 spin_unlock(&vgic_cpu->ap_list_lock); 642 raw_spin_unlock(&vgic_cpu->ap_list_lock);
642 643
643 /* 644 /*
644 * Ensure locking order by always locking the smallest 645 * Ensure locking order by always locking the smallest
@@ -652,10 +653,10 @@ retry:
652 vcpuB = vcpu; 653 vcpuB = vcpu;
653 } 654 }
654 655
655 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); 656 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
656 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, 657 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
657 SINGLE_DEPTH_NESTING); 658 SINGLE_DEPTH_NESTING);
658 spin_lock(&irq->irq_lock); 659 raw_spin_lock(&irq->irq_lock);
659 660
660 /* 661 /*
661 * If the affinity has been preserved, move the 662 * If the affinity has been preserved, move the
@@ -675,9 +676,9 @@ retry:
675 target_vcpu_needs_kick = true; 676 target_vcpu_needs_kick = true;
676 } 677 }
677 678
678 spin_unlock(&irq->irq_lock); 679 raw_spin_unlock(&irq->irq_lock);
679 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 680 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
680 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); 681 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
681 682
682 if (target_vcpu_needs_kick) { 683 if (target_vcpu_needs_kick) {
683 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); 684 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
@@ -687,7 +688,7 @@ retry:
687 goto retry; 688 goto retry;
688 } 689 }
689 690
690 spin_unlock(&vgic_cpu->ap_list_lock); 691 raw_spin_unlock(&vgic_cpu->ap_list_lock);
691} 692}
692 693
693static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) 694static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
741 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 742 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
742 int w; 743 int w;
743 744
744 spin_lock(&irq->irq_lock); 745 raw_spin_lock(&irq->irq_lock);
745 /* GICv2 SGIs can count for more than one... */ 746 /* GICv2 SGIs can count for more than one... */
746 w = vgic_irq_get_lr_count(irq); 747 w = vgic_irq_get_lr_count(irq);
747 spin_unlock(&irq->irq_lock); 748 raw_spin_unlock(&irq->irq_lock);
748 749
749 count += w; 750 count += w;
750 *multi_sgi |= (w > 1); 751 *multi_sgi |= (w > 1);
@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
770 count = 0; 771 count = 0;
771 772
772 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 773 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
773 spin_lock(&irq->irq_lock); 774 raw_spin_lock(&irq->irq_lock);
774 775
775 /* 776 /*
776 * If we have multi-SGIs in the pipeline, we need to 777 * If we have multi-SGIs in the pipeline, we need to
@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
780 * the AP list has been sorted already. 781 * the AP list has been sorted already.
781 */ 782 */
782 if (multi_sgi && irq->priority > prio) { 783 if (multi_sgi && irq->priority > prio) {
783 spin_unlock(&irq->irq_lock); 784 _raw_spin_unlock(&irq->irq_lock);
784 break; 785 break;
785 } 786 }
786 787
@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
791 prio = irq->priority; 792 prio = irq->priority;
792 } 793 }
793 794
794 spin_unlock(&irq->irq_lock); 795 raw_spin_unlock(&irq->irq_lock);
795 796
796 if (count == kvm_vgic_global_state.nr_lr) { 797 if (count == kvm_vgic_global_state.nr_lr) {
797 if (!list_is_last(&irq->ap_list, 798 if (!list_is_last(&irq->ap_list,
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
872 873
873 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 874 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
874 875
875 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); 876 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
876 vgic_flush_lr_state(vcpu); 877 vgic_flush_lr_state(vcpu);
877 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); 878 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
878 879
879 if (can_access_vgic_from_kernel()) 880 if (can_access_vgic_from_kernel())
880 vgic_restore_state(vcpu); 881 vgic_restore_state(vcpu);
@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
918 919
919 vgic_get_vmcr(vcpu, &vmcr); 920 vgic_get_vmcr(vcpu, &vmcr);
920 921
921 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); 922 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
922 923
923 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 924 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
924 spin_lock(&irq->irq_lock); 925 raw_spin_lock(&irq->irq_lock);
925 pending = irq_is_pending(irq) && irq->enabled && 926 pending = irq_is_pending(irq) && irq->enabled &&
926 !irq->active && 927 !irq->active &&
927 irq->priority < vmcr.pmr; 928 irq->priority < vmcr.pmr;
928 spin_unlock(&irq->irq_lock); 929 raw_spin_unlock(&irq->irq_lock);
929 930
930 if (pending) 931 if (pending)
931 break; 932 break;
932 } 933 }
933 934
934 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); 935 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
935 936
936 return pending; 937 return pending;
937} 938}
@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
963 return false; 964 return false;
964 965
965 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 966 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
966 spin_lock_irqsave(&irq->irq_lock, flags); 967 raw_spin_lock_irqsave(&irq->irq_lock, flags);
967 map_is_active = irq->hw && irq->active; 968 map_is_active = irq->hw && irq->active;
968 spin_unlock_irqrestore(&irq->irq_lock, flags); 969 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
969 vgic_put_irq(vcpu->kvm, irq); 970 vgic_put_irq(vcpu->kvm, irq);
970 971
971 return map_is_active; 972 return map_is_active;
972} 973}
973